diff --git a/Dockerfile.dapper b/Dockerfile.dapper
index 9d2da185525b..1feb56b70b20 100644
--- a/Dockerfile.dapper
+++ b/Dockerfile.dapper
@@ -27,14 +27,6 @@ RUN if [ "${ARCH}" = 'amd64' ]; then \
ARG SELINUX=true
ENV SELINUX $SELINUX
-ARG DQLITE=true
-ENV DQLITE $DQLITE
-COPY --from=rancher/dqlite-build:v1.4.1-r1 /dist/artifacts /usr/src/
-RUN if [ "$DQLITE" = true ]; then \
- tar xzf /usr/src/dqlite.tgz -C / && \
- apk add --allow-untrusted /usr/local/packages/*.apk \
- ;fi
-
ENV GO111MODULE off
ENV DAPPER_RUN_ARGS --privileged -v k3s-cache:/go/src/github.com/rancher/k3s/.cache
ENV DAPPER_ENV REPO TAG DRONE_TAG IMAGE_NAME SKIP_VALIDATE GCLOUD_AUTH
diff --git a/cmd/k3s/main.go b/cmd/k3s/main.go
index 016ec4bac7d5..ec97ae648d87 100644
--- a/cmd/k3s/main.go
+++ b/cmd/k3s/main.go
@@ -13,6 +13,7 @@ import (
"github.com/rancher/k3s/pkg/data"
"github.com/rancher/k3s/pkg/datadir"
"github.com/rancher/k3s/pkg/untar"
+ "github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@@ -24,8 +25,8 @@ func main() {
app := cmds.NewApp()
app.Commands = []cli.Command{
- cmds.NewServerCommand(wrap("k3s-server", os.Args)),
- cmds.NewAgentCommand(wrap("k3s-agent", os.Args)),
+ cmds.NewServerCommand(wrap(version.Program+"-server", os.Args)),
+ cmds.NewAgentCommand(wrap(version.Program+"-agent", os.Args)),
cmds.NewKubectlCommand(externalCLIAction("kubectl")),
cmds.NewCRICTL(externalCLIAction("crictl")),
cmds.NewCtrCommand(externalCLIAction("ctr")),
@@ -88,7 +89,7 @@ func stageAndRun(dataDir string, cmd string, args []string) error {
if err := os.Setenv("PATH", filepath.Join(dir, "bin")+":"+os.Getenv("PATH")+":"+filepath.Join(dir, "bin/aux")); err != nil {
return err
}
- if err := os.Setenv("K3S_DATA_DIR", dir); err != nil {
+ if err := os.Setenv(version.ProgramUpper+"_DATA_DIR", dir); err != nil {
return err
}
diff --git a/go.mod b/go.mod
index 14e140908a15..d97d56af8696 100644
--- a/go.mod
+++ b/go.mod
@@ -66,7 +66,6 @@ require (
github.com/bhendo/go-powershell v0.0.0-20190719160123-219e7fb4e41e // indirect
github.com/bronze1man/goStrongswanVici v0.0.0-20190828090544-27d02f80ba40 // indirect
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 // indirect
- github.com/canonical/go-dqlite v1.5.1
github.com/containerd/cgroups v0.0.0-00010101000000-000000000000 // indirect
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 // indirect
@@ -81,18 +80,17 @@ require (
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f
github.com/docker/docker v1.4.2-0.20191205034852-d163fbba3c82
github.com/docker/go-metrics v0.0.1 // indirect
- github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4 // indirect
github.com/go-bindata/go-bindata v3.1.2+incompatible
github.com/go-sql-driver/mysql v1.4.1
github.com/gogo/googleapis v1.3.0 // indirect
github.com/google/tcpproxy v0.0.0-20180808230851-dfa16c61dad2
+ github.com/google/uuid v1.1.1
github.com/gorilla/mux v1.7.3
github.com/gorilla/websocket v1.4.1
github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d // indirect
github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 // indirect
github.com/kubernetes-sigs/cri-tools v0.0.0-00010101000000-000000000000
github.com/lib/pq v1.1.1
- github.com/lxc/lxd v0.0.0-20191108214106-60ea15630455
github.com/mattn/go-sqlite3 v1.13.0
github.com/natefinch/lumberjack v2.0.0+incompatible
github.com/opencontainers/runc v1.0.0-rc10
@@ -110,12 +108,13 @@ require (
github.com/spf13/pflag v1.0.5
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
github.com/urfave/cli v1.22.2
+ // 54ba958 is v3.4.9
+ go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
golang.org/x/net v0.0.0-20191204025024-5ee1b9f4859a
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e
google.golang.org/grpc v1.26.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
- gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5 // indirect
gopkg.in/yaml.v2 v2.2.8
k8s.io/api v0.18.0
k8s.io/apimachinery v0.18.0
@@ -126,4 +125,5 @@ require (
k8s.io/cri-api v0.0.0
k8s.io/klog v1.0.0
k8s.io/kubernetes v1.18.0
+ sigs.k8s.io/yaml v1.2.0
)
diff --git a/go.sum b/go.sum
index 4c18fa86f0fd..564b5ddf4714 100644
--- a/go.sum
+++ b/go.sum
@@ -207,8 +207,6 @@ github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4 h1:GY1+t5Dr9OKADM64SYnQjw/w99HMYvQ0A8/JoUkxVmc=
-github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
@@ -223,8 +221,6 @@ github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG
github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE=
github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo=
-github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=
-github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA=
github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
@@ -446,13 +442,10 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/juju/errors v0.0.0-20180806074554-22422dad46e1/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
-github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d h1:hJXjZMxj0SWlMoQkzeZDLi2cmeiWKa7y1B8Rg+qaoEc=
github.com/juju/errors v0.0.0-20190806202954-0232dcc7464d/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q=
-github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI=
github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U=
-github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
github.com/juju/testing v0.0.0-20190613124551-e81189438503/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2 h1:Pp8RxiF4rSoXP9SED26WCfNB28/dwTDpPXS8XMJR8rc=
github.com/juju/testing v0.0.0-20190723135506-ce30eb24acd2/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA=
@@ -494,8 +487,6 @@ github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH
github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk=
github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao=
github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58=
-github.com/lxc/lxd v0.0.0-20191108214106-60ea15630455 h1:gQQV7It0kjZxMLJkS/+5Mc6w0zM6pKGzl3OS0h2RHrY=
-github.com/lxc/lxd v0.0.0-20191108214106-60ea15630455/go.mod h1:2BaZflfwsv8a3uy3/Vw+de4Avn4DSrAiqaHJjCIXMV4=
github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@@ -814,6 +805,8 @@ go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f h1:pBCD+Z7cy5WPTq+R6MmJJvDRpn88cp7bmTypBsn91g4=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=
@@ -948,7 +941,6 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1027,8 +1019,6 @@ gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3M
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5 h1:E846t8CnR+lv5nE+VuiKTDG/v1U2stad0QzddfJC7kY=
-gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5/go.mod h1:hiOFpYm0ZJbusNj2ywpbrXowU3G8U6GIQzqn2mw1UIE=
gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
diff --git a/pkg/agent/config/config.go b/pkg/agent/config/config.go
index 2ee0c3b6cbaa..93a943f2f530 100644
--- a/pkg/agent/config/config.go
+++ b/pkg/agent/config/config.go
@@ -25,6 +25,7 @@ import (
"github.com/rancher/k3s/pkg/clientaccess"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/control"
+ "github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/net"
@@ -73,12 +74,12 @@ func getNodeNamedCrt(nodeName, nodePasswordFile string) HTTPRequester {
req.SetBasicAuth(username, password)
}
- req.Header.Set("K3s-Node-Name", nodeName)
+ req.Header.Set(version.Program+"-Node-Name", nodeName)
nodePassword, err := ensureNodePassword(nodePasswordFile)
if err != nil {
return nil, err
}
- req.Header.Set("K3s-Node-Password", nodePassword)
+ req.Header.Set(version.Program+"-Node-Password", nodePassword)
resp, err := client.Do(req)
if err != nil {
@@ -142,7 +143,7 @@ func upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile string)
}
func getServingCert(nodeName, servingCertFile, servingKeyFile, nodePasswordFile string, info *clientaccess.Info) (*tls.Certificate, error) {
- servingCert, err := Request("/v1-k3s/serving-kubelet.crt", info, getNodeNamedCrt(nodeName, nodePasswordFile))
+ servingCert, err := Request("/v1-"+version.Program+"/serving-kubelet.crt", info, getNodeNamedCrt(nodeName, nodePasswordFile))
if err != nil {
return nil, err
}
@@ -166,7 +167,7 @@ func getServingCert(nodeName, servingCertFile, servingKeyFile, nodePasswordFile
func getHostFile(filename, keyFile string, info *clientaccess.Info) error {
basename := filepath.Base(filename)
- fileBytes, err := clientaccess.Get("/v1-k3s/"+basename, info)
+ fileBytes, err := clientaccess.Get("/v1-"+version.Program+"/"+basename, info)
if err != nil {
return err
}
@@ -206,7 +207,7 @@ func splitCertKeyPEM(bytes []byte) (certPem []byte, keyPem []byte) {
func getNodeNamedHostFile(filename, keyFile, nodeName, nodePasswordFile string, info *clientaccess.Info) error {
basename := filepath.Base(filename)
- fileBytes, err := Request("/v1-k3s/"+basename, info, getNodeNamedCrt(nodeName, nodePasswordFile))
+ fileBytes, err := Request("/v1-"+version.Program+"/"+basename, info, getNodeNamedCrt(nodeName, nodePasswordFile))
if err != nil {
return err
}
@@ -282,7 +283,7 @@ func locateOrGenerateResolvConf(envInfo *cmds.Agent) string {
}
}
- tmpConf := filepath.Join(os.TempDir(), "k3s-resolv.conf")
+ tmpConf := filepath.Join(os.TempDir(), version.Program+"-resolv.conf")
if err := ioutil.WriteFile(tmpConf, []byte("nameserver 8.8.8.8\n"), 0444); err != nil {
logrus.Error(err)
return ""
@@ -385,13 +386,13 @@ func get(envInfo *cmds.Agent, proxy proxy.Proxy) (*config.Node, error) {
return nil, err
}
- clientK3sControllerCert := filepath.Join(envInfo.DataDir, "client-k3s-controller.crt")
- clientK3sControllerKey := filepath.Join(envInfo.DataDir, "client-k3s-controller.key")
+ clientK3sControllerCert := filepath.Join(envInfo.DataDir, "client-"+version.Program+"-controller.crt")
+ clientK3sControllerKey := filepath.Join(envInfo.DataDir, "client-"+version.Program+"-controller.key")
if err := getHostFile(clientK3sControllerCert, clientK3sControllerKey, info); err != nil {
return nil, err
}
- kubeconfigK3sController := filepath.Join(envInfo.DataDir, "k3scontroller.kubeconfig")
+ kubeconfigK3sController := filepath.Join(envInfo.DataDir, version.Program+"controller.kubeconfig")
if err := control.KubeConfig(kubeconfigK3sController, proxy.APIServerURL(), serverCAFile, clientK3sControllerCert, clientK3sControllerKey); err != nil {
return nil, err
}
@@ -488,7 +489,7 @@ func get(envInfo *cmds.Agent, proxy proxy.Proxy) (*config.Node, error) {
}
func getConfig(info *clientaccess.Info) (*config.Control, error) {
- data, err := clientaccess.Get("/v1-k3s/config", info)
+ data, err := clientaccess.Get("/v1-"+version.Program+"/config", info)
if err != nil {
return nil, err
}
diff --git a/pkg/agent/containerd/containerd.go b/pkg/agent/containerd/containerd.go
index 512f0dde7dbb..b7bfd94467a1 100644
--- a/pkg/agent/containerd/containerd.go
+++ b/pkg/agent/containerd/containerd.go
@@ -20,6 +20,7 @@ import (
"github.com/rancher/k3s/pkg/agent/templates"
util2 "github.com/rancher/k3s/pkg/agent/util"
"github.com/rancher/k3s/pkg/daemons/config"
+ "github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
"google.golang.org/grpc"
yaml "gopkg.in/yaml.v2"
@@ -233,7 +234,7 @@ func setupContainerdConfig(ctx context.Context, cfg *config.Node) error {
containerdConfig.SELinuxEnabled = selEnabled
}
if containerdConfig.SELinuxEnabled && !selConfigured {
- logrus.Warnf("SELinux is enabled for k3s but process is not running in context '%s', k3s-selinux policy may need to be applied", SELinuxContextType)
+ logrus.Warnf("SELinux is enabled for "+version.Program+" but process is not running in context '%s', "+version.Program+"-selinux policy may need to be applied", SELinuxContextType)
}
containerdTemplateBytes, err := ioutil.ReadFile(cfg.Containerd.Template)
diff --git a/pkg/agent/flannel/setup.go b/pkg/agent/flannel/setup.go
index 7a5fb1d8503d..f6bac73cc4cf 100644
--- a/pkg/agent/flannel/setup.go
+++ b/pkg/agent/flannel/setup.go
@@ -10,6 +10,7 @@ import (
"github.com/rancher/k3s/pkg/agent/util"
"github.com/rancher/k3s/pkg/daemons/config"
+ "github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
@@ -142,7 +143,7 @@ func createFlannelConf(nodeConfig *config.Node) error {
func setupStrongSwan(nodeConfig *config.Node) error {
// if data dir env is not set point to root
- dataDir := os.Getenv("K3S_DATA_DIR")
+ dataDir := os.Getenv(version.ProgramUpper + "_DATA_DIR")
if dataDir == "" {
dataDir = "/"
}
diff --git a/pkg/agent/loadbalancer/loadbalancer.go b/pkg/agent/loadbalancer/loadbalancer.go
index d5a8368c4712..134e38a4c21e 100644
--- a/pkg/agent/loadbalancer/loadbalancer.go
+++ b/pkg/agent/loadbalancer/loadbalancer.go
@@ -8,6 +8,7 @@ import (
"sync"
"github.com/google/tcpproxy"
+ "github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
)
@@ -27,9 +28,9 @@ type LoadBalancer struct {
nextServerIndex int
}
-const (
- SupervisorServiceName = "k3s-agent-load-balancer"
- APIServerServiceName = "k3s-api-server-agent-load-balancer"
+var (
+ SupervisorServiceName = version.Program + "-agent-load-balancer"
+ APIServerServiceName = version.Program + "-api-server-agent-load-balancer"
)
func New(dataDir, serviceName, serverURL string) (_lb *LoadBalancer, _err error) {
diff --git a/pkg/agent/run.go b/pkg/agent/run.go
index 5c1d2822064e..aa287e29dada 100644
--- a/pkg/agent/run.go
+++ b/pkg/agent/run.go
@@ -23,6 +23,7 @@ import (
daemonconfig "github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/nodeconfig"
"github.com/rancher/k3s/pkg/rootless"
+ "github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -32,10 +33,10 @@ import (
"k8s.io/client-go/tools/clientcmd"
)
-const (
- InternalIPLabel = "k3s.io/internal-ip"
- ExternalIPLabel = "k3s.io/external-ip"
- HostnameLabel = "k3s.io/hostname"
+var (
+ InternalIPLabel = version.Program + ".io/internal-ip"
+ ExternalIPLabel = version.Program + ".io/external-ip"
+ HostnameLabel = version.Program + ".io/hostname"
)
func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
diff --git a/pkg/agent/tunnel/tunnel.go b/pkg/agent/tunnel/tunnel.go
index c88c8ce9a95b..d25f4a05aaad 100644
--- a/pkg/agent/tunnel/tunnel.go
+++ b/pkg/agent/tunnel/tunnel.go
@@ -13,6 +13,7 @@ import (
"github.com/gorilla/websocket"
"github.com/rancher/k3s/pkg/agent/proxy"
"github.com/rancher/k3s/pkg/daemons/config"
+ "github.com/rancher/k3s/pkg/version"
"github.com/rancher/remotedialer"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
@@ -159,7 +160,7 @@ func Setup(ctx context.Context, config *config.Node, proxy proxy.Proxy) error {
}
func connect(rootCtx context.Context, waitGroup *sync.WaitGroup, address string, tlsConfig *tls.Config) context.CancelFunc {
- wsURL := fmt.Sprintf("wss://%s/v1-k3s/connect", address)
+ wsURL := fmt.Sprintf("wss://%s/v1-"+version.Program+"/connect", address)
ws := &websocket.Dialer{
TLSClientConfig: tlsConfig,
}
diff --git a/pkg/cli/agent/agent.go b/pkg/cli/agent/agent.go
index f39afda33e35..0d6c48cacabf 100644
--- a/pkg/cli/agent/agent.go
+++ b/pkg/cli/agent/agent.go
@@ -11,6 +11,7 @@ import (
"github.com/rancher/k3s/pkg/datadir"
"github.com/rancher/k3s/pkg/netutil"
"github.com/rancher/k3s/pkg/token"
+ "github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler/pkg/signals"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@@ -48,7 +49,7 @@ func Run(ctx *cli.Context) error {
cmds.AgentConfig.NodeIP = netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface)
}
- logrus.Infof("Starting k3s agent %s", ctx.App.Version)
+ logrus.Infof("Starting "+version.Program+" agent %s", ctx.App.Version)
dataDir, err := datadir.LocalHome(cmds.AgentConfig.DataDir, cmds.AgentConfig.Rootless)
if err != nil {
diff --git a/pkg/cli/cmds/agent.go b/pkg/cli/cmds/agent.go
index 3a3dbdb642c4..86fb35f3de16 100644
--- a/pkg/cli/cmds/agent.go
+++ b/pkg/cli/cmds/agent.go
@@ -4,6 +4,7 @@ import (
"os"
"path/filepath"
+ "github.com/rancher/k3s/pkg/version"
"github.com/urfave/cli"
)
@@ -57,7 +58,7 @@ var (
NodeNameFlag = cli.StringFlag{
Name: "node-name",
Usage: "(agent/node) Node name",
- EnvVar: "K3S_NODE_NAME",
+ EnvVar: version.ProgramUpper + "_NODE_NAME",
Destination: &AgentConfig.NodeName,
}
WithNodeIDFlag = cli.BoolFlag{
@@ -79,7 +80,7 @@ var (
Name: "private-registry",
Usage: "(agent/runtime) Private registry configuration file",
Destination: &AgentConfig.PrivateRegistry,
- Value: "/etc/rancher/k3s/registries.yaml",
+ Value: "/etc/rancher/" + version.Program + "/registries.yaml",
}
PauseImageFlag = cli.StringFlag{
Name: "pause-image",
@@ -105,7 +106,7 @@ var (
ResolvConfFlag = cli.StringFlag{
Name: "resolv-conf",
Usage: "(agent/networking) Kubelet resolv.conf file",
- EnvVar: "K3S_RESOLV_CONF",
+ EnvVar: version.ProgramUpper + "_RESOLV_CONF",
Destination: &AgentConfig.ResolvConf,
}
ExtraKubeletArgs = cli.StringSliceFlag{
@@ -150,26 +151,26 @@ func NewAgentCommand(action func(ctx *cli.Context) error) cli.Command {
cli.StringFlag{
Name: "token,t",
Usage: "(cluster) Token to use for authentication",
- EnvVar: "K3S_TOKEN",
+ EnvVar: version.ProgramUpper + "_TOKEN",
Destination: &AgentConfig.Token,
},
cli.StringFlag{
Name: "token-file",
Usage: "(cluster) Token file to use for authentication",
- EnvVar: "K3S_TOKEN_FILE",
+ EnvVar: version.ProgramUpper + "_TOKEN_FILE",
Destination: &AgentConfig.TokenFile,
},
cli.StringFlag{
Name: "server,s",
Usage: "(cluster) Server to connect to",
- EnvVar: "K3S_URL",
+ EnvVar: version.ProgramUpper + "_URL",
Destination: &AgentConfig.ServerURL,
},
cli.StringFlag{
Name: "data-dir,d",
Usage: "(agent/data) Folder to hold state",
Destination: &AgentConfig.DataDir,
- Value: "/var/lib/rancher/k3s",
+ Value: "/var/lib/rancher/" + version.Program + "",
},
NodeNameFlag,
WithNodeIDFlag,
@@ -200,7 +201,7 @@ func NewAgentCommand(action func(ctx *cli.Context) error) cli.Command {
Name: "cluster-secret",
Usage: "(deprecated) use --token",
Destination: &AgentConfig.ClusterSecret,
- EnvVar: "K3S_CLUSTER_SECRET",
+ EnvVar: version.ProgramUpper + "_CLUSTER_SECRET",
},
},
}
diff --git a/pkg/cli/cmds/log.go b/pkg/cli/cmds/log.go
index f18f6f08bf9a..44f6bd8d01f1 100644
--- a/pkg/cli/cmds/log.go
+++ b/pkg/cli/cmds/log.go
@@ -11,6 +11,7 @@ import (
"github.com/docker/docker/pkg/reexec"
"github.com/natefinch/lumberjack"
+ "github.com/rancher/k3s/pkg/version"
"github.com/urfave/cli"
)
@@ -90,7 +91,7 @@ func runWithLogging() error {
l = io.MultiWriter(l, os.Stderr)
}
- args := append([]string{"k3s"}, os.Args[1:]...)
+ args := append([]string{version.Program}, os.Args[1:]...)
cmd := reexec.Command(args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "_K3S_LOG_REEXEC_=true")
diff --git a/pkg/cli/cmds/root.go b/pkg/cli/cmds/root.go
index 387f79882a61..212ec13075af 100644
--- a/pkg/cli/cmds/root.go
+++ b/pkg/cli/cmds/root.go
@@ -33,7 +33,7 @@ func NewApp() *cli.App {
Name: "debug",
Usage: "Turn on debug logs",
Destination: &Debug,
- EnvVar: "K3S_DEBUG",
+ EnvVar: version.ProgramUpper + "_DEBUG",
},
}
diff --git a/pkg/cli/cmds/server.go b/pkg/cli/cmds/server.go
index e4095070b404..ccb7974b76ed 100644
--- a/pkg/cli/cmds/server.go
+++ b/pkg/cli/cmds/server.go
@@ -1,6 +1,7 @@
package cmds
import (
+ "github.com/rancher/k3s/pkg/version"
"github.com/urfave/cli"
)
@@ -69,7 +70,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
AlsoLogToStderr,
cli.StringFlag{
Name: "bind-address",
- Usage: "(listener) k3s bind address (default: 0.0.0.0)",
+ Usage: "(listener) " + version.Program + " bind address (default: 0.0.0.0)",
Destination: &ServerConfig.BindAddress,
},
cli.IntFlag{
@@ -95,7 +96,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
},
cli.StringFlag{
Name: "data-dir,d",
- Usage: "(data) Folder to hold state default /var/lib/rancher/k3s or ${HOME}/.rancher/k3s if not root",
+ Usage: "(data) Folder to hold state default /var/lib/rancher/" + version.Program + " or ${HOME}/.rancher/" + version.Program + " if not root",
Destination: &ServerConfig.DataDir,
},
cli.StringFlag{
@@ -132,25 +133,25 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
Name: "token,t",
Usage: "(cluster) Shared secret used to join a server or agent to a cluster",
Destination: &ServerConfig.Token,
- EnvVar: "K3S_TOKEN",
+ EnvVar: version.ProgramUpper + "_TOKEN",
},
cli.StringFlag{
Name: "token-file",
Usage: "(cluster) File containing the cluster-secret/token",
Destination: &ServerConfig.TokenFile,
- EnvVar: "K3S_TOKEN_FILE",
+ EnvVar: version.ProgramUpper + "_TOKEN_FILE",
},
cli.StringFlag{
Name: "write-kubeconfig,o",
Usage: "(client) Write kubeconfig for admin client to this file",
Destination: &ServerConfig.KubeConfigOutput,
- EnvVar: "K3S_KUBECONFIG_OUTPUT",
+ EnvVar: version.ProgramUpper + "_KUBECONFIG_OUTPUT",
},
cli.StringFlag{
Name: "write-kubeconfig-mode",
Usage: "(client) Write kubeconfig with this mode",
Destination: &ServerConfig.KubeConfigMode,
- EnvVar: "K3S_KUBECONFIG_MODE",
+ EnvVar: version.ProgramUpper + "_KUBECONFIG_MODE",
},
cli.StringSliceFlag{
Name: "kube-apiserver-arg",
@@ -176,25 +177,25 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
Name: "datastore-endpoint",
Usage: "(db) Specify etcd, Mysql, Postgres, or Sqlite (default) data source name",
Destination: &ServerConfig.DatastoreEndpoint,
- EnvVar: "K3S_DATASTORE_ENDPOINT",
+ EnvVar: version.ProgramUpper + "_DATASTORE_ENDPOINT",
},
cli.StringFlag{
Name: "datastore-cafile",
Usage: "(db) TLS Certificate Authority file used to secure datastore backend communication",
Destination: &ServerConfig.DatastoreCAFile,
- EnvVar: "K3S_DATASTORE_CAFILE",
+ EnvVar: version.ProgramUpper + "_DATASTORE_CAFILE",
},
cli.StringFlag{
Name: "datastore-certfile",
Usage: "(db) TLS certification file used to secure datastore backend communication",
Destination: &ServerConfig.DatastoreCertFile,
- EnvVar: "K3S_DATASTORE_CERTFILE",
+ EnvVar: version.ProgramUpper + "_DATASTORE_CERTFILE",
},
cli.StringFlag{
Name: "datastore-keyfile",
Usage: "(db) TLS key file used to secure datastore backend communication",
Destination: &ServerConfig.DatastoreKeyFile,
- EnvVar: "K3S_DATASTORE_KEYFILE",
+ EnvVar: version.ProgramUpper + "_DATASTORE_KEYFILE",
},
cli.StringFlag{
Name: "default-local-storage-path",
@@ -212,7 +213,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
},
cli.BoolFlag{
Name: "disable-cloud-controller",
- Usage: "(components) Disable k3s default cloud controller manager",
+ Usage: "(components) Disable " + version.Program + " default cloud controller manager",
Destination: &ServerConfig.DisableCCM,
},
cli.BoolFlag{
@@ -222,7 +223,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
},
cli.BoolFlag{
Name: "disable-network-policy",
- Usage: "(components) Disable k3s default network policy controller",
+ Usage: "(components) Disable " + version.Program + " default network policy controller",
Destination: &ServerConfig.DisableNPC,
},
NodeNameFlag,
@@ -250,32 +251,32 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
Name: "agent-token",
Usage: "(experimental/cluster) Shared secret used to join agents to the cluster, but not servers",
Destination: &ServerConfig.AgentToken,
- EnvVar: "K3S_AGENT_TOKEN",
+ EnvVar: version.ProgramUpper + "_AGENT_TOKEN",
},
cli.StringFlag{
Name: "agent-token-file",
Usage: "(experimental/cluster) File containing the agent secret",
Destination: &ServerConfig.AgentTokenFile,
- EnvVar: "K3S_AGENT_TOKEN_FILE",
+ EnvVar: version.ProgramUpper + "_AGENT_TOKEN_FILE",
},
cli.StringFlag{
Name: "server,s",
Usage: "(experimental/cluster) Server to connect to, used to join a cluster",
- EnvVar: "K3S_URL",
+ EnvVar: version.ProgramUpper + "_URL",
Destination: &ServerConfig.ServerURL,
},
cli.BoolFlag{
Name: "cluster-init",
Hidden: hideDqlite,
Usage: "(experimental/cluster) Initialize new cluster master",
- EnvVar: "K3S_CLUSTER_INIT",
+ EnvVar: version.ProgramUpper + "_CLUSTER_INIT",
Destination: &ServerConfig.ClusterInit,
},
cli.BoolFlag{
Name: "cluster-reset",
Hidden: hideDqlite,
Usage: "(experimental/cluster) Forget all peers and become a single cluster new cluster master",
- EnvVar: "K3S_CLUSTER_RESET",
+ EnvVar: version.ProgramUpper + "_CLUSTER_RESET",
Destination: &ServerConfig.ClusterReset,
},
cli.BoolFlag{
@@ -295,7 +296,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
Name: "cluster-secret",
Usage: "(deprecated) use --token",
Destination: &ServerConfig.ClusterSecret,
- EnvVar: "K3S_CLUSTER_SECRET",
+ EnvVar: version.ProgramUpper + "_CLUSTER_SECRET",
},
cli.BoolFlag{
Name: "disable-agent",
diff --git a/pkg/cli/server/server.go b/pkg/cli/server/server.go
index 534f8ea6397c..296fdf74fce9 100644
--- a/pkg/cli/server/server.go
+++ b/pkg/cli/server/server.go
@@ -17,6 +17,7 @@ import (
"github.com/rancher/k3s/pkg/rootless"
"github.com/rancher/k3s/pkg/server"
"github.com/rancher/k3s/pkg/token"
+ "github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler/pkg/signals"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
@@ -209,7 +210,7 @@ func run(app *cli.Context, cfg *cmds.Server) error {
return errors.Wrapf(err, "Invalid TLS Cipher Suites %s: %v", TLSCipherSuites, err)
}
- logrus.Info("Starting k3s ", app.App.Version)
+ logrus.Info("Starting "+version.Program+" ", app.App.Version)
notifySocket := os.Getenv("NOTIFY_SOCKET")
os.Unsetenv("NOTIFY_SOCKET")
@@ -220,7 +221,7 @@ func run(app *cli.Context, cfg *cmds.Server) error {
go func() {
<-serverConfig.ControlConfig.Runtime.APIServerReady
- logrus.Info("k3s is up and running")
+ logrus.Info("" + version.Program + " is up and running")
if notifySocket != "" {
os.Setenv("NOTIFY_SOCKET", notifySocket)
systemd.SdNotify(true, "READY=1\n")
diff --git a/pkg/cloudprovider/cloudprovider.go b/pkg/cloudprovider/cloudprovider.go
index 16e6825e1870..e36abd76a721 100644
--- a/pkg/cloudprovider/cloudprovider.go
+++ b/pkg/cloudprovider/cloudprovider.go
@@ -4,6 +4,7 @@ import (
"context"
"io"
+ "github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler-api/pkg/generated/controllers/core"
coreclient "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/start"
@@ -15,7 +16,7 @@ type k3s struct {
}
func init() {
- cloudprovider.RegisterCloudProvider("k3s", func(config io.Reader) (cloudprovider.Interface, error) {
+ cloudprovider.RegisterCloudProvider(version.Program, func(config io.Reader) (cloudprovider.Interface, error) {
return &k3s{}, nil
})
}
@@ -49,7 +50,7 @@ func (k *k3s) Routes() (cloudprovider.Routes, bool) {
}
func (k *k3s) ProviderName() string {
- return "k3s"
+ return version.Program
}
func (k *k3s) HasClusterID() bool {
diff --git a/pkg/cloudprovider/instances.go b/pkg/cloudprovider/instances.go
index 3b340b7bea9e..0343404af6f2 100644
--- a/pkg/cloudprovider/instances.go
+++ b/pkg/cloudprovider/instances.go
@@ -4,16 +4,17 @@ import (
"context"
"fmt"
+ "github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
cloudprovider "k8s.io/cloud-provider"
)
-const (
- InternalIPLabel = "k3s.io/internal-ip"
- ExternalIPLabel = "k3s.io/external-ip"
- HostnameLabel = "k3s.io/hostname"
+var (
+ InternalIPLabel = version.Program + ".io/internal-ip"
+ ExternalIPLabel = version.Program + ".io/external-ip"
+ HostnameLabel = version.Program + ".io/hostname"
)
func (k *k3s) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
@@ -45,7 +46,7 @@ func (k *k3s) InstanceType(ctx context.Context, name types.NodeName) (string, er
if err != nil {
return "", err
}
- return "k3s", nil
+ return version.Program, nil
}
func (k *k3s) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
diff --git a/pkg/cluster/bootstrap.go b/pkg/cluster/bootstrap.go
new file mode 100644
index 000000000000..848cf44e660b
--- /dev/null
+++ b/pkg/cluster/bootstrap.go
@@ -0,0 +1,110 @@
+package cluster
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/rancher/k3s/pkg/bootstrap"
+ "github.com/rancher/k3s/pkg/clientaccess"
+ "github.com/rancher/k3s/pkg/version"
+ "github.com/sirupsen/logrus"
+)
+
+func (c *Cluster) Bootstrap(ctx context.Context) error {
+ if err := c.assignManagedDriver(ctx); err != nil {
+ return err
+ }
+
+ runBootstrap, err := c.shouldBootstrapLoad()
+ if err != nil {
+ return err
+ }
+ c.shouldBootstrap = runBootstrap
+
+ if runBootstrap {
+ if err := c.bootstrap(ctx); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Cluster) shouldBootstrapLoad() (bool, error) {
+ if c.managedDB != nil {
+ c.runtime.HTTPBootstrap = true
+ if c.config.JoinURL == "" {
+ return false, nil
+ }
+
+ token, err := clientaccess.NormalizeAndValidateTokenForUser(c.config.JoinURL, c.config.Token, "server")
+ if err != nil {
+ return false, err
+ }
+
+ info, err := clientaccess.ParseAndValidateToken(c.config.JoinURL, token)
+ if err != nil {
+ return false, err
+ }
+ c.clientAccessInfo = info
+ }
+
+ stamp := c.bootstrapStamp()
+ if _, err := os.Stat(stamp); err == nil {
+ logrus.Info("Cluster bootstrap already complete")
+ return false, nil
+ }
+
+ if c.managedDB != nil && c.config.Token == "" {
+ return false, fmt.Errorf("K3S_TOKEN is required to join a cluster")
+ }
+
+ return true, nil
+}
+
+func (c *Cluster) bootstrapped() error {
+ if err := os.MkdirAll(filepath.Dir(c.bootstrapStamp()), 0700); err != nil {
+ return err
+ }
+
+ if _, err := os.Stat(c.bootstrapStamp()); err == nil {
+ return nil
+ }
+
+ f, err := os.Create(c.bootstrapStamp())
+ if err != nil {
+ return err
+ }
+
+ return f.Close()
+}
+
+func (c *Cluster) httpBootstrap() error {
+ content, err := clientaccess.Get("/v1-"+version.Program+"/server-bootstrap", c.clientAccessInfo)
+ if err != nil {
+ return err
+ }
+
+ return bootstrap.Read(bytes.NewBuffer(content), &c.runtime.ControlRuntimeBootstrap)
+}
+
+func (c *Cluster) bootstrap(ctx context.Context) error {
+ c.joining = true
+
+ if c.runtime.HTTPBootstrap {
+ return c.httpBootstrap()
+ }
+
+ if err := c.storageBootstrap(ctx); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *Cluster) bootstrapStamp() string {
+ return filepath.Join(c.config.DataDir, "db/joined-"+keyHash(c.config.Token))
+}
diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go
index 3e9b49d008da..09e4398ef1fe 100644
--- a/pkg/cluster/cluster.go
+++ b/pkg/cluster/cluster.go
@@ -6,6 +6,7 @@ import (
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/clientaccess"
+ "github.com/rancher/k3s/pkg/cluster/managed"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/kine/pkg/client"
"github.com/rancher/kine/pkg/endpoint"
@@ -15,8 +16,8 @@ type Cluster struct {
clientAccessInfo *clientaccess.Info
config *config.Control
runtime *config.ControlRuntime
- db interface{}
- runJoin bool
+ managedDB managed.Driver
+ shouldBootstrap bool
storageStarted bool
etcdConfig endpoint.ETCDConfig
joining bool
@@ -24,34 +25,33 @@ type Cluster struct {
storageClient client.Client
}
-func (c *Cluster) Start(ctx context.Context) error {
- if err := c.startClusterAndHTTPS(ctx); err != nil {
- return errors.Wrap(err, "start cluster and https")
+func (c *Cluster) Start(ctx context.Context) (<-chan struct{}, error) {
+ if err := c.initClusterAndHTTPS(ctx); err != nil {
+ return nil, errors.Wrap(err, "start cluster and https")
}
- if c.runJoin {
- if err := c.postJoin(ctx); err != nil {
- return errors.Wrap(err, "post join")
- }
+ if err := c.start(ctx); err != nil {
+ return nil, errors.Wrap(err, "start cluster and https")
}
- if err := c.testClusterDB(ctx); err != nil {
- return err
+ ready, err := c.testClusterDB(ctx)
+ if err != nil {
+ return nil, err
}
if c.saveBootstrap {
if err := c.save(ctx); err != nil {
- return err
+ return nil, err
}
}
- if c.runJoin {
- if err := c.joined(); err != nil {
- return err
+ if c.shouldBootstrap {
+ if err := c.bootstrapped(); err != nil {
+ return nil, err
}
}
- return c.startStorage(ctx)
+ return ready, c.startStorage(ctx)
}
func (c *Cluster) startStorage(ctx context.Context) error {
diff --git a/pkg/cluster/dqlite.go b/pkg/cluster/dqlite.go
deleted file mode 100644
index 9019ea5a8263..000000000000
--- a/pkg/cluster/dqlite.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// +build dqlite
-
-package cluster
-
-import (
- "context"
- "crypto/tls"
- "encoding/json"
- "net"
- "net/http"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/canonical/go-dqlite/client"
- "github.com/rancher/dynamiclistener/factory"
- "github.com/rancher/k3s/pkg/clientaccess"
- "github.com/rancher/k3s/pkg/daemons/config"
- "github.com/rancher/k3s/pkg/dqlite"
- "github.com/rancher/kine/pkg/endpoint"
- v1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
- "github.com/sirupsen/logrus"
-)
-
-func (c *Cluster) testClusterDB(ctx context.Context) error {
- if !c.dqliteEnabled() {
- return nil
- }
-
- dqlite := c.db.(*dqlite.DQLite)
- for {
- if err := dqlite.Test(ctx); err != nil {
- logrus.Infof("Failed to test dqlite connection: %v", err)
- } else {
- return nil
- }
-
- select {
- case <-time.After(2 * time.Second):
- case <-ctx.Done():
- return ctx.Err()
- }
- }
-}
-
-func (c *Cluster) initClusterDB(ctx context.Context, l net.Listener, handler http.Handler) (net.Listener, http.Handler, error) {
- if !c.dqliteEnabled() {
- return l, handler, nil
- }
-
- dqlite := dqlite.New(c.config.DataDir, c.config.AdvertiseIP, c.config.AdvertisePort, func() v1.NodeController {
- if c.runtime.Core == nil {
- return nil
- }
- return c.runtime.Core.Core().V1().Node()
- })
-
- certs, err := toGetCerts(c.runtime)
- if err != nil {
- return nil, nil, err
- }
-
- handler, err = dqlite.Start(ctx, c.config.ClusterInit, c.config.ClusterReset, certs, handler)
- if err != nil {
- return nil, nil, err
- }
-
- if c.config.ClusterReset {
- if err := dqlite.Reset(ctx); err == nil {
- logrus.Info("Cluster reset successful, now rejoin members")
- os.Exit(0)
- } else {
- logrus.Fatalf("Cluster reset failed: %v", err)
- }
- }
-
- c.db = dqlite
- if !strings.HasPrefix(c.config.Datastore.Endpoint, "dqlite://") {
- c.config.Datastore = endpoint.Config{
- Endpoint: dqlite.StorageEndpoint,
- }
- }
-
- return l, handler, err
-}
-
-func (c *Cluster) dqliteEnabled() bool {
- stamp := filepath.Join(dqlite.GetDBDir(c.config.DataDir))
- if _, err := os.Stat(stamp); err == nil {
- return true
- }
-
- driver, _ := endpoint.ParseStorageEndpoint(c.config.Datastore.Endpoint)
- if driver == endpoint.DQLiteBackend {
- return true
- }
-
- return c.config.Datastore.Endpoint == "" && (c.config.ClusterInit || (c.config.Token != "" && c.config.JoinURL != ""))
-}
-
-func (c *Cluster) postJoin(ctx context.Context) error {
- if !c.dqliteEnabled() {
- return nil
- }
-
- resp, err := clientaccess.Get("/db/info", c.clientAccessInfo)
- if err != nil {
- return err
- }
-
- dqlite := c.db.(*dqlite.DQLite)
- var nodes []client.NodeInfo
-
- if err := json.Unmarshal(resp, &nodes); err != nil {
- return err
- }
-
- return dqlite.Join(ctx, nodes)
-}
-
-func toGetCerts(runtime *config.ControlRuntime) (*dqlite.Certs, error) {
- clientCA, _, err := factory.LoadCerts(runtime.ClientCA, runtime.ClientCAKey)
- if err != nil {
- return nil, err
- }
-
- ca, _, err := factory.LoadCerts(runtime.ServerCA, runtime.ServerCAKey)
- if err != nil {
- return nil, err
- }
-
- clientCert, err := tls.LoadX509KeyPair(runtime.ClientKubeAPICert, runtime.ClientKubeAPIKey)
- if err != nil {
- return nil, err
- }
-
- return &dqlite.Certs{
- ServerTrust: ca,
- ClientTrust: clientCA,
- ClientCert: clientCert,
- }, nil
-}
diff --git a/pkg/cluster/etcd.go b/pkg/cluster/etcd.go
new file mode 100644
index 000000000000..403eb4a99ac9
--- /dev/null
+++ b/pkg/cluster/etcd.go
@@ -0,0 +1,12 @@
+// +build !no_etcd
+
+package cluster
+
+import (
+ "github.com/rancher/k3s/pkg/cluster/managed"
+ "github.com/rancher/k3s/pkg/etcd"
+)
+
+func init() {
+ managed.RegisterDriver(&etcd.ETCD{})
+}
diff --git a/pkg/cluster/https.go b/pkg/cluster/https.go
index 8cc7035bca94..5dea465c278e 100644
--- a/pkg/cluster/https.go
+++ b/pkg/cluster/https.go
@@ -13,6 +13,7 @@ import (
"github.com/rancher/dynamiclistener/storage/kubernetes"
"github.com/rancher/dynamiclistener/storage/memory"
"github.com/rancher/k3s/pkg/daemons/config"
+ "github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler-api/pkg/generated/controllers/core"
"github.com/sirupsen/logrus"
)
@@ -30,8 +31,8 @@ func (c *Cluster) newListener(ctx context.Context) (net.Listener, http.Handler,
storage := tlsStorage(ctx, c.config.DataDir, c.runtime)
return dynamiclistener.NewListener(tcp, storage, cert, key, dynamiclistener.Config{
- CN: "k3s",
- Organization: []string{"k3s"},
+ CN: version.Program,
+ Organization: []string{version.Program},
TLSConfig: tls.Config{
ClientAuth: tls.RequestClientCert,
MinVersion: c.config.TLSMinVersion,
@@ -41,7 +42,7 @@ func (c *Cluster) newListener(ctx context.Context) (net.Listener, http.Handler,
})
}
-func (c *Cluster) startClusterAndHTTPS(ctx context.Context) error {
+func (c *Cluster) initClusterAndHTTPS(ctx context.Context) error {
l, handler, err := c.newListener(ctx)
if err != nil {
return err
@@ -79,5 +80,5 @@ func tlsStorage(ctx context.Context, dataDir string, runtime *config.ControlRunt
cache := memory.NewBacked(fileStorage)
return kubernetes.New(ctx, func() *core.Factory {
return runtime.Core
- }, "kube-system", "k3s-serving", cache)
+ }, "kube-system", ""+version.Program+"-serving", cache)
}
diff --git a/pkg/cluster/join.go b/pkg/cluster/join.go
deleted file mode 100644
index ecb85e2b7abb..000000000000
--- a/pkg/cluster/join.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package cluster
-
-import (
- "bytes"
- "context"
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/rancher/k3s/pkg/bootstrap"
- "github.com/rancher/k3s/pkg/clientaccess"
- "github.com/sirupsen/logrus"
-)
-
-func (c *Cluster) Join(ctx context.Context) error {
- runJoin, err := c.shouldJoin()
- if err != nil {
- return err
- }
- c.runJoin = runJoin
-
- if runJoin {
- if err := c.join(ctx); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *Cluster) shouldJoin() (bool, error) {
- dqlite := c.dqliteEnabled()
- if dqlite {
- c.runtime.HTTPBootstrap = true
- if c.config.JoinURL == "" {
- return false, nil
- }
- }
-
- stamp := c.joinStamp()
- if _, err := os.Stat(stamp); err == nil {
- logrus.Info("Cluster bootstrap already complete")
- return false, nil
- }
-
- if dqlite && c.config.Token == "" {
- return false, fmt.Errorf("K3S_TOKEN is required to join a cluster")
- }
-
- return true, nil
-}
-
-func (c *Cluster) joined() error {
- if err := os.MkdirAll(filepath.Dir(c.joinStamp()), 0700); err != nil {
- return err
- }
-
- if _, err := os.Stat(c.joinStamp()); err == nil {
- return nil
- }
-
- f, err := os.Create(c.joinStamp())
- if err != nil {
- return err
- }
-
- return f.Close()
-}
-
-func (c *Cluster) httpJoin() error {
- token, err := clientaccess.NormalizeAndValidateTokenForUser(c.config.JoinURL, c.config.Token, "server")
- if err != nil {
- return err
- }
-
- info, err := clientaccess.ParseAndValidateToken(c.config.JoinURL, token)
- if err != nil {
- return err
- }
- c.clientAccessInfo = info
-
- content, err := clientaccess.Get("/v1-k3s/server-bootstrap", info)
- if err != nil {
- return err
- }
-
- return bootstrap.Read(bytes.NewBuffer(content), &c.runtime.ControlRuntimeBootstrap)
-}
-
-func (c *Cluster) join(ctx context.Context) error {
- c.joining = true
-
- if c.runtime.HTTPBootstrap {
- return c.httpJoin()
- }
-
- if err := c.storageJoin(ctx); err != nil {
- return err
- }
-
- return nil
-}
-
-func (c *Cluster) joinStamp() string {
- return filepath.Join(c.config.DataDir, "db/joined-"+keyHash(c.config.Token))
-}
diff --git a/pkg/cluster/managed.go b/pkg/cluster/managed.go
new file mode 100644
index 000000000000..a89fb85ad01f
--- /dev/null
+++ b/pkg/cluster/managed.go
@@ -0,0 +1,97 @@
+package cluster
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/rancher/k3s/pkg/cluster/managed"
+ "github.com/rancher/kine/pkg/endpoint"
+ "github.com/sirupsen/logrus"
+)
+
+func (c *Cluster) testClusterDB(ctx context.Context) (<-chan struct{}, error) {
+ result := make(chan struct{})
+ if c.managedDB == nil {
+ close(result)
+ return result, nil
+ }
+
+ go func() {
+ defer close(result)
+ for {
+ if err := c.managedDB.Test(ctx); err != nil {
+ logrus.Infof("Failed to test data store connection: %v", err)
+ } else {
+ logrus.Infof("Data store connection OK")
+ return
+ }
+
+ select {
+ case <-time.After(5 * time.Second):
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
+ return result, nil
+}
+
+func (c *Cluster) start(ctx context.Context) error {
+ if c.managedDB == nil {
+ return nil
+ }
+
+ if c.config.ClusterReset {
+ return c.managedDB.Reset(ctx)
+ }
+
+ return c.managedDB.Start(ctx, c.clientAccessInfo)
+}
+
+func (c *Cluster) initClusterDB(ctx context.Context, l net.Listener, handler http.Handler) (net.Listener, http.Handler, error) {
+ if c.managedDB == nil {
+ return l, handler, nil
+ }
+
+ if !strings.HasPrefix(c.config.Datastore.Endpoint, c.managedDB.EndpointName()+"://") {
+ c.config.Datastore = endpoint.Config{
+ Endpoint: c.managedDB.EndpointName(),
+ }
+ }
+
+ return c.managedDB.Register(ctx, c.config, l, handler)
+}
+
+func (c *Cluster) assignManagedDriver(ctx context.Context) error {
+ for _, driver := range managed.Registered() {
+ if ok, err := driver.IsInitialized(ctx, c.config); err != nil {
+ return err
+ } else if ok {
+ c.managedDB = driver
+ return nil
+ }
+ }
+
+ endpointType := strings.SplitN(c.config.Datastore.Endpoint, ":", 2)[0]
+ for _, driver := range managed.Registered() {
+ if endpointType == driver.EndpointName() {
+ c.managedDB = driver
+ return nil
+ }
+ }
+
+ if c.config.Datastore.Endpoint == "" && (c.config.ClusterInit || (c.config.Token != "" && c.config.JoinURL != "")) {
+ for _, driver := range managed.Registered() {
+ if driver.EndpointName() == managed.Default() {
+ c.managedDB = driver
+ return nil
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/cluster/managed/drivers.go b/pkg/cluster/managed/drivers.go
new file mode 100644
index 000000000000..8ee78b2aff87
--- /dev/null
+++ b/pkg/cluster/managed/drivers.go
@@ -0,0 +1,39 @@
+package managed
+
+import (
+ "context"
+ "net"
+ "net/http"
+
+ "github.com/rancher/k3s/pkg/clientaccess"
+ "github.com/rancher/k3s/pkg/daemons/config"
+)
+
+var (
+ defaultDriver string
+ drivers []Driver
+)
+
+type Driver interface {
+ IsInitialized(ctx context.Context, config *config.Control) (bool, error)
+ Register(ctx context.Context, config *config.Control, l net.Listener, handler http.Handler) (net.Listener, http.Handler, error)
+ Reset(ctx context.Context) error
+ Start(ctx context.Context, clientAccess *clientaccess.Info) error
+ Test(ctx context.Context) error
+ EndpointName() string
+}
+
+func RegisterDriver(d Driver) {
+ drivers = append(drivers, d)
+}
+
+func Registered() []Driver {
+ return drivers
+}
+
+func Default() string {
+ if defaultDriver == "" && len(drivers) == 1 {
+ return drivers[0].EndpointName()
+ }
+ return defaultDriver
+}
diff --git a/pkg/cluster/nocluster.go b/pkg/cluster/nocluster.go
deleted file mode 100644
index 19f5728c2ab6..000000000000
--- a/pkg/cluster/nocluster.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// +build !dqlite
-
-package cluster
-
-import (
- "context"
- "net"
- "net/http"
-)
-
-func (c *Cluster) testClusterDB(ctx context.Context) error {
- return nil
-}
-
-func (c *Cluster) initClusterDB(ctx context.Context, l net.Listener, handler http.Handler) (net.Listener, http.Handler, error) {
- return l, handler, nil
-}
-
-func (c *Cluster) postJoin(ctx context.Context) error {
- return nil
-}
-
-func (c *Cluster) dqliteEnabled() bool {
- return false
-}
diff --git a/pkg/cluster/storage.go b/pkg/cluster/storage.go
index dc3d776de6e0..51e8aac9674b 100644
--- a/pkg/cluster/storage.go
+++ b/pkg/cluster/storage.go
@@ -22,7 +22,7 @@ func (c *Cluster) save(ctx context.Context) error {
return c.storageClient.Create(ctx, storageKey(c.config.Token), data)
}
-func (c *Cluster) storageJoin(ctx context.Context) error {
+func (c *Cluster) storageBootstrap(ctx context.Context) error {
if err := c.startStorage(ctx); err != nil {
return err
}
diff --git a/pkg/daemons/config/types.go b/pkg/daemons/config/types.go
index 521b8839f4da..bfa7700f78f3 100644
--- a/pkg/daemons/config/types.go
+++ b/pkg/daemons/config/types.go
@@ -1,6 +1,7 @@
package config
import (
+ "context"
"crypto/tls"
"fmt"
"net"
@@ -132,6 +133,10 @@ type Control struct {
}
type ControlRuntimeBootstrap struct {
+ ETCDServerCA string
+ ETCDServerCAKey string
+ ETCDPeerCA string
+ ETCDPeerCAKey string
ServerCA string
ServerCAKey string
ClientCA string
@@ -147,8 +152,10 @@ type ControlRuntimeBootstrap struct {
type ControlRuntime struct {
ControlRuntimeBootstrap
- HTTPBootstrap bool
- APIServerReady <-chan struct{}
+ HTTPBootstrap bool
+ APIServerReady <-chan struct{}
+ ETCDReady <-chan struct{}
+ ClusterControllerStart func(ctx context.Context) error
ClientKubeAPICert string
ClientKubeAPIKey string
@@ -186,6 +193,13 @@ type ControlRuntime struct {
ClientK3sControllerCert string
ClientK3sControllerKey string
+ ServerETCDCert string
+ ServerETCDKey string
+ PeerServerClientETCDCert string
+ PeerServerClientETCDKey string
+ ClientETCDCert string
+ ClientETCDKey string
+
Core *core.Factory
}
diff --git a/pkg/daemons/control/server.go b/pkg/daemons/control/server.go
index 6326a7db3d96..2f7620a16aea 100644
--- a/pkg/daemons/control/server.go
+++ b/pkg/daemons/control/server.go
@@ -19,6 +19,8 @@ import (
"text/template"
"time"
+ "k8s.io/apimachinery/pkg/util/sets"
+
"github.com/pkg/errors"
certutil "github.com/rancher/dynamiclistener/cert"
"github.com/rancher/k3s/pkg/clientaccess"
@@ -27,6 +29,7 @@ import (
"github.com/rancher/k3s/pkg/daemons/executor"
"github.com/rancher/k3s/pkg/passwd"
"github.com/rancher/k3s/pkg/token"
+ "github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler-api/pkg/generated/controllers/rbac"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -186,7 +189,7 @@ func apiServer(ctx context.Context, cfg *config.Control, runtime *config.Control
argsMap["tls-cert-file"] = runtime.ServingKubeAPICert
argsMap["tls-private-key-file"] = runtime.ServingKubeAPIKey
argsMap["service-account-key-file"] = runtime.ServiceKey
- argsMap["service-account-issuer"] = "k3s"
+ argsMap["service-account-issuer"] = version.Program
argsMap["api-audiences"] = "unknown"
argsMap["basic-auth-file"] = runtime.PasswdFile
argsMap["kubelet-certificate-authority"] = runtime.ServerCA
@@ -208,7 +211,7 @@ func apiServer(ctx context.Context, cfg *config.Control, runtime *config.Control
args := config.GetArgsList(argsMap, cfg.ExtraAPIArgs)
logrus.Infof("Running kube-apiserver %s", config.ArgString(args))
- return executor.APIServer(ctx, args)
+ return executor.APIServer(ctx, runtime.ETCDReady, args)
}
func defaults(config *config.Control) {
@@ -290,8 +293,8 @@ func prepare(ctx context.Context, config *config.Control, runtime *config.Contro
runtime.ClientKubeAPIKey = filepath.Join(config.DataDir, "tls", "client-kube-apiserver.key")
runtime.ClientKubeProxyCert = filepath.Join(config.DataDir, "tls", "client-kube-proxy.crt")
runtime.ClientKubeProxyKey = filepath.Join(config.DataDir, "tls", "client-kube-proxy.key")
- runtime.ClientK3sControllerCert = filepath.Join(config.DataDir, "tls", "client-k3s-controller.crt")
- runtime.ClientK3sControllerKey = filepath.Join(config.DataDir, "tls", "client-k3s-controller.key")
+ runtime.ClientK3sControllerCert = filepath.Join(config.DataDir, "tls", "client-"+version.Program+"-controller.crt")
+ runtime.ClientK3sControllerKey = filepath.Join(config.DataDir, "tls", "client-"+version.Program+"-controller.key")
runtime.ServingKubeAPICert = filepath.Join(config.DataDir, "tls", "serving-kube-apiserver.crt")
runtime.ServingKubeAPIKey = filepath.Join(config.DataDir, "tls", "serving-kube-apiserver.key")
@@ -302,13 +305,24 @@ func prepare(ctx context.Context, config *config.Control, runtime *config.Contro
runtime.ClientAuthProxyCert = filepath.Join(config.DataDir, "tls", "client-auth-proxy.crt")
runtime.ClientAuthProxyKey = filepath.Join(config.DataDir, "tls", "client-auth-proxy.key")
+ runtime.ETCDServerCA = filepath.Join(config.DataDir, "tls", "etcd", "server-ca.crt")
+ runtime.ETCDServerCAKey = filepath.Join(config.DataDir, "tls", "etcd", "server-ca.key")
+ runtime.ETCDPeerCA = filepath.Join(config.DataDir, "tls", "etcd", "peer-ca.crt")
+ runtime.ETCDPeerCAKey = filepath.Join(config.DataDir, "tls", "etcd", "peer-ca.key")
+ runtime.ServerETCDCert = filepath.Join(config.DataDir, "tls", "etcd", "server-client.crt")
+ runtime.ServerETCDKey = filepath.Join(config.DataDir, "tls", "etcd", "server-client.key")
+ runtime.PeerServerClientETCDCert = filepath.Join(config.DataDir, "tls", "etcd", "peer-server-client.crt")
+ runtime.PeerServerClientETCDKey = filepath.Join(config.DataDir, "tls", "etcd", "peer-server-client.key")
+ runtime.ClientETCDCert = filepath.Join(config.DataDir, "tls", "etcd", "client.crt")
+ runtime.ClientETCDKey = filepath.Join(config.DataDir, "tls", "etcd", "client.key")
+
if config.EncryptSecrets {
runtime.EncryptionConfig = filepath.Join(config.DataDir, "cred", "encryption-config.json")
}
cluster := cluster.New(config)
- if err := cluster.Join(ctx); err != nil {
+ if err := cluster.Bootstrap(ctx); err != nil {
return err
}
@@ -336,7 +350,13 @@ func prepare(ctx context.Context, config *config.Control, runtime *config.Contro
return err
}
- return cluster.Start(ctx)
+ ready, err := cluster.Start(ctx)
+ if err != nil {
+ return err
+ }
+
+ runtime.ETCDReady = ready
+ return nil
}
func readTokens(runtime *config.ControlRuntime) error {
@@ -382,7 +402,7 @@ func migratePassword(p *passwd.Passwd) error {
server, _ := p.Pass("server")
node, _ := p.Pass("node")
if server == "" && node != "" {
- return p.EnsureUser("server", "k3s:server", node)
+ return p.EnsureUser("server", version.Program+":server", node)
}
return nil
}
@@ -433,11 +453,11 @@ func genUsers(config *config.Control, runtime *config.ControlRuntime) error {
nodePass := getNodePass(config, serverPass)
- if err := passwd.EnsureUser("node", "k3s:agent", nodePass); err != nil {
+ if err := passwd.EnsureUser("node", version.Program+":agent", nodePass); err != nil {
return err
}
- if err := passwd.EnsureUser("server", "k3s:server", serverPass); err != nil {
+ if err := passwd.EnsureUser("server", version.Program+":server", serverPass); err != nil {
return err
}
@@ -454,6 +474,9 @@ func genCerts(config *config.Control, runtime *config.ControlRuntime) error {
if err := genRequestHeaderCerts(config, runtime); err != nil {
return err
}
+ if err := genETCDCerts(config, runtime); err != nil {
+ return err
+ }
return nil
}
@@ -466,7 +489,7 @@ func getSigningCertFactory(regen bool, altNames *certutil.AltNames, extKeyUsage
}
func genClientCerts(config *config.Control, runtime *config.ControlRuntime) error {
- regen, err := createSigningCertKey("k3s-client", runtime.ClientCA, runtime.ClientCAKey)
+ regen, err := createSigningCertKey(version.Program+"-client", runtime.ClientCA, runtime.ClientCAKey)
if err != nil {
return err
}
@@ -519,6 +542,7 @@ func genClientCerts(config *config.Control, runtime *config.ControlRuntime) erro
if _, err = factory("system:kube-proxy", nil, runtime.ClientKubeProxyCert, runtime.ClientKubeProxyKey); err != nil {
return err
}
+ // this must be hardcoded to k3s-controller because it's hard coded in the rolebindings.yaml
if _, err = factory("system:k3s-controller", nil, runtime.ClientK3sControllerCert, runtime.ClientK3sControllerKey); err != nil {
return err
}
@@ -554,7 +578,18 @@ func createServerSigningCertKey(config *config.Control, runtime *config.ControlR
}
return true, nil
}
- return createSigningCertKey("k3s-server", runtime.ServerCA, runtime.ServerCAKey)
+ return createSigningCertKey(version.Program+"-server", runtime.ServerCA, runtime.ServerCAKey)
+}
+
+func addSANs(altNames *certutil.AltNames, sans []string) {
+ for _, san := range sans {
+ ip := net.ParseIP(san)
+ if ip == nil {
+ altNames.DNSNames = append(altNames.DNSNames, san)
+ } else {
+ altNames.IPs = append(altNames.IPs, ip)
+ }
+ }
}
func genServerCerts(config *config.Control, runtime *config.ControlRuntime) error {
@@ -568,11 +603,15 @@ func genServerCerts(config *config.Control, runtime *config.ControlRuntime) erro
return err
}
+ altNames := &certutil.AltNames{
+ DNSNames: []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"},
+ IPs: []net.IP{apiServerServiceIP},
+ }
+
+ addSANs(altNames, config.SANs)
+
if _, err := createClientCertKey(regen, "kube-apiserver", nil,
- &certutil.AltNames{
- DNSNames: []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes", "localhost"},
- IPs: []net.IP{apiServerServiceIP, localhostIP},
- }, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+ altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
runtime.ServerCA, runtime.ServerCAKey,
runtime.ServingKubeAPICert, runtime.ServingKubeAPIKey); err != nil {
return err
@@ -585,8 +624,48 @@ func genServerCerts(config *config.Control, runtime *config.ControlRuntime) erro
return nil
}
+func genETCDCerts(config *config.Control, runtime *config.ControlRuntime) error {
+ regen, err := createSigningCertKey("etcd-server", runtime.ETCDServerCA, runtime.ETCDServerCAKey)
+ if err != nil {
+ return err
+ }
+
+ altNames := &certutil.AltNames{
+ DNSNames: []string{"localhost"},
+ }
+ addSANs(altNames, config.SANs)
+
+ if _, err := createClientCertKey(regen, "etcd-server", nil,
+ altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
+ runtime.ETCDServerCA, runtime.ETCDServerCAKey,
+ runtime.ServerETCDCert, runtime.ServerETCDKey); err != nil {
+ return err
+ }
+
+ if _, err := createClientCertKey(regen, "etcd-client", nil,
+ nil, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
+ runtime.ETCDServerCA, runtime.ETCDServerCAKey,
+ runtime.ClientETCDCert, runtime.ClientETCDKey); err != nil {
+ return err
+ }
+
+ regen, err = createSigningCertKey("etcd-peer", runtime.ETCDPeerCA, runtime.ETCDPeerCAKey)
+ if err != nil {
+ return err
+ }
+
+ if _, err := createClientCertKey(regen, "etcd-peer", nil,
+ altNames, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
+ runtime.ETCDPeerCA, runtime.ETCDPeerCAKey,
+ runtime.PeerServerClientETCDCert, runtime.PeerServerClientETCDKey); err != nil {
+ return err
+ }
+
+ return nil
+}
+
func genRequestHeaderCerts(config *config.Control, runtime *config.ControlRuntime) error {
- regen, err := createSigningCertKey("k3s-request-header", runtime.RequestHeaderCA, runtime.RequestHeaderCAKey)
+ regen, err := createSigningCertKey(version.Program+"-request-header", runtime.RequestHeaderCA, runtime.RequestHeaderCAKey)
if err != nil {
return err
}
@@ -615,6 +694,10 @@ func createClientCertKey(regen bool, commonName string, organization []string, a
regen = expired(certFile, pool)
}
+ if !regen {
+ regen = sansChanged(certFile, altNames)
+ }
+
if !regen {
if exists(certFile, keyFile) {
return false, nil
@@ -755,6 +838,43 @@ func setupStorageBackend(argsMap map[string]string, cfg *config.Control) {
}
}
+func sansChanged(certFile string, sans *certutil.AltNames) bool {
+ if sans == nil {
+ return false
+ }
+
+ certBytes, err := ioutil.ReadFile(certFile)
+ if err != nil {
+ return false
+ }
+
+ certificates, err := certutil.ParseCertsPEM(certBytes)
+ if err != nil {
+ return false
+ }
+
+ if len(certificates) == 0 {
+ return false
+ }
+
+ if !sets.NewString(certificates[0].DNSNames...).HasAll(sans.DNSNames...) {
+ return true
+ }
+
+ ips := sets.NewString()
+ for _, ip := range certificates[0].IPAddresses {
+ ips.Insert(ip.String())
+ }
+
+ for _, ip := range sans.IPs {
+ if !ips.Has(ip.String()) {
+ return true
+ }
+ }
+
+ return false
+}
+
func expired(certFile string, pool *x509.CertPool) bool {
certBytes, err := ioutil.ReadFile(certFile)
if err != nil {
@@ -783,7 +903,7 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control, runtime *c
"cluster-cidr": cfg.ClusterIPRange.String(),
"bind-address": localhostIP.String(),
"secure-port": "0",
- "cloud-provider": "k3s",
+ "cloud-provider": version.Program,
"allow-untagged-cloud": "true",
"node-status-update-frequency": "1m",
}
@@ -845,6 +965,19 @@ func waitForAPIServerInBackground(ctx context.Context, runtime *config.ControlRu
go func() {
defer close(done)
+
+ etcdLoop:
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-runtime.ETCDReady:
+ break etcdLoop
+ case <-time.After(30 * time.Second):
+ logrus.Infof("Waiting for etcd server to become available")
+ }
+ }
+
logrus.Infof("Waiting for API server to become available")
for {
select {
diff --git a/pkg/daemons/executor/embed.go b/pkg/daemons/executor/embed.go
index 27bc97848460..1560de8c7e05 100644
--- a/pkg/daemons/executor/embed.go
+++ b/pkg/daemons/executor/embed.go
@@ -45,7 +45,8 @@ func (Embedded) KubeProxy(args []string) error {
return nil
}
-func (Embedded) APIServer(ctx context.Context, args []string) (authenticator.Request, http.Handler, error) {
+func (Embedded) APIServer(ctx context.Context, etcdReady <-chan struct{}, args []string) (authenticator.Request, http.Handler, error) {
+ <-etcdReady
command := app.NewAPIServerCommand(ctx.Done())
command.SetArgs(args)
diff --git a/pkg/daemons/executor/etcd.go b/pkg/daemons/executor/etcd.go
new file mode 100644
index 000000000000..3d31a08f6033
--- /dev/null
+++ b/pkg/daemons/executor/etcd.go
@@ -0,0 +1,33 @@
+// +build !no_embedded_executor
+
+package executor
+
+import (
+ "github.com/sirupsen/logrus"
+ "go.etcd.io/etcd/embed"
+)
+
+func (e Embedded) CurrentETCDOptions() (InitialOptions, error) {
+ return InitialOptions{}, nil
+}
+
+func (e Embedded) ETCD(args ETCDConfig) error {
+ configFile, err := args.ToConfigFile()
+ if err != nil {
+ return err
+ }
+ cfg, err := embed.ConfigFromFile(configFile)
+ if err != nil {
+ return err
+ }
+ etcd, err := embed.StartEtcd(cfg)
+ if err != nil {
+ return nil
+ }
+
+ go func() {
+ err := <-etcd.Err()
+ logrus.Fatalf("etcd exited: %v", err)
+ }()
+ return nil
+}
diff --git a/pkg/daemons/executor/executor.go b/pkg/daemons/executor/executor.go
index fd19adb43d3b..6bf2f6e56de3 100644
--- a/pkg/daemons/executor/executor.go
+++ b/pkg/daemons/executor/executor.go
@@ -2,22 +2,78 @@ package executor
import (
"context"
+ "io/ioutil"
"net/http"
+ "os"
+ "path/filepath"
+
+ "sigs.k8s.io/yaml"
"k8s.io/apiserver/pkg/authentication/authenticator"
)
+var (
+ executor Executor
+)
+
type Executor interface {
Kubelet(args []string) error
KubeProxy(args []string) error
- APIServer(ctx context.Context, args []string) (authenticator.Request, http.Handler, error)
+ APIServer(ctx context.Context, etcdReady <-chan struct{}, args []string) (authenticator.Request, http.Handler, error)
Scheduler(apiReady <-chan struct{}, args []string) error
ControllerManager(apiReady <-chan struct{}, args []string) error
+ CurrentETCDOptions() (InitialOptions, error)
+ ETCD(args ETCDConfig) error
}
-var (
- executor Executor
-)
+type ETCDConfig struct {
+ InitialOptions `json:",inline"`
+ Name string `json:"name,omitempty"`
+ ListenClientURLs string `json:"listen-client-urls,omitempty"`
+ ListenMetricsURLs string `json:"listen-metrics-urls,omitempty"`
+ ListenPeerURLs string `json:"listen-peer-urls,omitempty"`
+ AdvertiseClientURLs string `json:"advertise-client-urls,omitempty"`
+ DataDir string `json:"data-dir,omitempty"`
+ SnapshotCount int `json:"snapshot-count,omitempty"`
+ ServerTrust ServerTrust `json:"client-transport-security"`
+ PeerTrust PeerTrust `json:"peer-transport-security"`
+ ForceNewCluster bool `json:"force-new-cluster,omitempty"`
+ HeartbeatInterval int `json:"heartbeat-interval"`
+ ElectionTimeout int `json:"election-timeout"`
+}
+
+type ServerTrust struct {
+ CertFile string `json:"cert-file"`
+ KeyFile string `json:"key-file"`
+ ClientCertAuth bool `json:"client-cert-auth"`
+ TrustedCAFile string `json:"trusted-ca-file"`
+}
+
+type PeerTrust struct {
+ CertFile string `json:"cert-file"`
+ KeyFile string `json:"key-file"`
+ ClientCertAuth bool `json:"client-cert-auth"`
+ TrustedCAFile string `json:"trusted-ca-file"`
+}
+
+type InitialOptions struct {
+ AdvertisePeerURL string `json:"initial-advertise-peer-urls,omitempty"`
+ Cluster string `json:"initial-cluster,omitempty"`
+ State string `json:"initial-cluster-state,omitempty"`
+}
+
+func (e ETCDConfig) ToConfigFile() (string, error) {
+ confFile := filepath.Join(e.DataDir, "config")
+ bytes, err := yaml.Marshal(&e)
+ if err != nil {
+ return "", err
+ }
+
+ if err := os.MkdirAll(e.DataDir, 0700); err != nil {
+ return "", err
+ }
+ return confFile, ioutil.WriteFile(confFile, bytes, 0600)
+}
func Set(driver Executor) {
executor = driver
@@ -31,8 +87,8 @@ func KubeProxy(args []string) error {
return executor.KubeProxy(args)
}
-func APIServer(ctx context.Context, args []string) (authenticator.Request, http.Handler, error) {
- return executor.APIServer(ctx, args)
+func APIServer(ctx context.Context, etcdReady <-chan struct{}, args []string) (authenticator.Request, http.Handler, error) {
+ return executor.APIServer(ctx, etcdReady, args)
}
func Scheduler(apiReady <-chan struct{}, args []string) error {
@@ -42,3 +98,11 @@ func Scheduler(apiReady <-chan struct{}, args []string) error {
func ControllerManager(apiReady <-chan struct{}, args []string) error {
return executor.ControllerManager(apiReady, args)
}
+
+func CurrentETCDOptions() (InitialOptions, error) {
+ return executor.CurrentETCDOptions()
+}
+
+func ETCD(args ETCDConfig) error {
+ return executor.ETCD(args)
+}
diff --git a/pkg/datadir/datadir.go b/pkg/datadir/datadir.go
index 298998d8727a..ce02cb7bef20 100644
--- a/pkg/datadir/datadir.go
+++ b/pkg/datadir/datadir.go
@@ -5,14 +5,15 @@ import (
"path/filepath"
"github.com/pkg/errors"
+ "github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler/pkg/resolvehome"
)
-const (
- DefaultDataDir = "/var/lib/rancher/k3s"
- DefaultHomeDataDir = "${HOME}/.rancher/k3s"
- HomeConfig = "${HOME}/.kube/k3s.yaml"
- GlobalConfig = "/etc/rancher/k3s/k3s.yaml"
+var (
+ DefaultDataDir = "/var/lib/rancher/" + version.Program
+ DefaultHomeDataDir = "${HOME}/.rancher/" + version.Program
+ HomeConfig = "${HOME}/.kube/" + version.Program + ".yaml"
+ GlobalConfig = "/etc/rancher/" + version.Program + "/" + version.Program + ".yaml"
)
func Resolve(dataDir string) (string, error) {
diff --git a/pkg/dqlite/controller/client/controller.go b/pkg/dqlite/controller/client/controller.go
deleted file mode 100644
index ece9832e6605..000000000000
--- a/pkg/dqlite/controller/client/controller.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package client
-
-import (
- "context"
- "fmt"
- "strconv"
-
- "github.com/canonical/go-dqlite/client"
- "github.com/canonical/go-dqlite/driver"
- controllerv1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
- "github.com/sirupsen/logrus"
- v1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/labels"
-)
-
-const (
- allKey = "_all_"
- nodeID = "cluster.k3s.cattle.io/node-id"
- nodeAddress = "cluster.k3s.cattle.io/node-address"
- master = "node-role.kubernetes.io/master"
-)
-
-func Register(ctx context.Context, nodeName string, nodeInfo client.NodeInfo,
- nodeStore client.NodeStore, nodes controllerv1.NodeController, opts []client.Option) {
- h := &handler{
- nodeStore: nodeStore,
- nodeController: nodes,
- nodeName: nodeName,
- id: strconv.FormatUint(nodeInfo.ID, 10),
- address: nodeInfo.Address,
- ctx: ctx,
- opts: opts,
- }
- nodes.OnChange(ctx, "dqlite-client", h.sync)
- nodes.OnRemove(ctx, "dqlite-client", h.onRemove)
-}
-
-type handler struct {
- nodeStore client.NodeStore
- nodeController controllerv1.NodeController
- nodeName string
- id string
- address string
- ctx context.Context
- opts []client.Option
-}
-
-func (h *handler) sync(key string, node *v1.Node) (*v1.Node, error) {
- if key == allKey {
- return nil, h.updateNodeStore()
- }
-
- if node == nil {
- return nil, nil
- }
-
- if key == h.nodeName {
- return h.handleSelf(node)
- }
-
- if node.Labels[master] == "true" {
- h.nodeController.Enqueue(allKey)
- }
-
- return node, nil
-}
-
-func (h *handler) ensureExists(address string) error {
- c, err := client.FindLeader(h.ctx, h.nodeStore, h.opts...)
- if err == driver.ErrNoAvailableLeader {
- logrus.Fatalf("no dqlite leader found: %v", err)
- } else if err != nil {
- return err
- }
- defer c.Close()
-
- members, err := c.Cluster(h.ctx)
- if err != nil {
- return err
- }
-
- for _, member := range members {
- if member.Address == address {
- return nil
- }
- }
-
- logrus.Fatalf("Address %s is not member of the cluster", address)
- return nil
-}
-
-func (h *handler) handleSelf(node *v1.Node) (*v1.Node, error) {
- if node.Annotations[nodeID] == h.id && node.Annotations[nodeAddress] == h.address {
- return node, h.ensureExists(h.address)
- }
-
- node = node.DeepCopy()
- if node.Annotations == nil {
- node.Annotations = map[string]string{}
- }
- node.Annotations[nodeID] = h.id
- node.Annotations[nodeAddress] = h.address
-
- return h.nodeController.Update(node)
-}
-
-func (h *handler) onRemove(key string, node *v1.Node) (*v1.Node, error) {
- address := node.Annotations[nodeAddress]
- if address == "" {
- return node, nil
- }
- return node, h.delete(address)
-}
-
-func (h *handler) delete(address string) error {
- c, err := client.FindLeader(h.ctx, h.nodeStore, h.opts...)
- if err != nil {
- return err
- }
- defer c.Close()
-
- members, err := c.Cluster(h.ctx)
- if err != nil {
- return err
- }
-
- for _, member := range members {
- if member.Address == address {
- logrus.Infof("Removing %s %d from dqlite", member.Address, member.ID)
- return c.Remove(h.ctx, member.ID)
- }
- }
-
- return nil
-}
-
-func (h *handler) updateNodeStore() error {
- nodes, err := h.nodeController.Cache().List(labels.SelectorFromSet(labels.Set{
- master: "true",
- }))
- if err != nil {
- return err
- }
-
- var (
- nodeInfos []client.NodeInfo
- seen = map[string]bool{}
- )
-
- for _, node := range nodes {
- address, ok := node.Annotations[nodeAddress]
- if !ok {
- continue
- }
-
- nodeIDStr, ok := node.Annotations[nodeID]
- if !ok {
- continue
- }
-
- id, err := strconv.ParseUint(nodeIDStr, 10, 64)
- if err != nil {
- logrus.Errorf("invalid %s=%s, must be a number: %v", nodeID, nodeIDStr, err)
- continue
- }
-
- if !seen[address] {
- nodeInfos = append(nodeInfos, client.NodeInfo{
- ID: id,
- Address: address,
- })
- seen[address] = true
- }
- }
-
- if len(nodeInfos) == 0 {
- return fmt.Errorf("not setting dqlient NodeStore len to 0")
- }
-
- return h.nodeStore.Set(h.ctx, nodeInfos)
-}
diff --git a/pkg/dqlite/dialer/dial.go b/pkg/dqlite/dialer/dial.go
deleted file mode 100644
index 87b801a7dc86..000000000000
--- a/pkg/dqlite/dialer/dial.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package dialer
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "net"
-
- "github.com/canonical/go-dqlite/client"
- "github.com/rancher/k3s/pkg/dqlite/pipe"
-)
-
-func NewHTTPDialer(advertiseAddress, bindAddress string, tls *tls.Config) (client.DialFunc, error) {
- d := &dialer{
- advertiseAddress: advertiseAddress,
- bindAddress: bindAddress,
- tls: tls,
- }
-
- return d.dial, nil
-}
-
-type dialer struct {
- advertiseAddress string
- bindAddress string
- tls *tls.Config
-}
-
-func (d *dialer) dial(ctx context.Context, address string) (net.Conn, error) {
- if address == d.advertiseAddress {
- return net.Dial("unix", d.bindAddress)
- }
-
- url := fmt.Sprintf("https://%s/db/connect", address)
- return pipe.ToHTTP(ctx, url, d.tls)
-}
diff --git a/pkg/dqlite/join.go b/pkg/dqlite/join.go
deleted file mode 100644
index d6648d285014..000000000000
--- a/pkg/dqlite/join.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package dqlite
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/canonical/go-dqlite/client"
- "github.com/pkg/errors"
- "github.com/sirupsen/logrus"
-)
-
-func (d *DQLite) Test(ctx context.Context) error {
- var ips []string
- peers, err := d.NodeStore.Get(ctx)
- if err != nil {
- return err
- }
-
- for _, peer := range peers {
- ips = append(ips, peer.Address)
- }
-
- logrus.Infof("Testing connection to peers %v", ips)
- if err := d.Join(ctx, nil); err != nil {
- return err
- }
- logrus.Infof("Connection OK to peers %v", ips)
- return nil
-}
-
-func nodeIDsEqual(testID, currentID uint64) bool {
- // this is a test for a bug in v1.0.0. In future versions we don't
- // generate node ID higher than 1<<20 so this doesn't matter. But
- // basically just ignore the first 32 bits.
- return uint32(testID) == uint32(currentID)
-}
-
-func (d *DQLite) Join(ctx context.Context, nodes []client.NodeInfo) error {
- if len(nodes) > 0 {
- if err := d.NodeStore.Set(ctx, nodes); err != nil {
- return err
- }
- }
-
- client, err := client.FindLeader(ctx, d.NodeStore, d.clientOpts...)
- if err != nil {
- return err
- }
- defer client.Close()
-
- current, err := client.Cluster(ctx)
- if err != nil {
- return err
- }
-
- nodeID, err := getClusterID(false, d.DataDir)
- if err != nil {
- return errors.Wrap(err, "get cluster ID")
- }
- for _, testNode := range current {
- if testNode.Address == d.NodeInfo.Address {
- if !nodeIDsEqual(testNode.ID, nodeID) {
- if err := d.node.Close(); err != nil {
- return errors.Wrap(err, "node close for id reset")
- }
- if err := writeClusterID(testNode.ID, d.DataDir); err != nil {
- return errors.Wrap(err, "restart node to reset ID")
- }
- logrus.Fatalf("resetting node ID from %d to %d, please restart", nodeID, testNode.ID)
- }
- return nil
- } else if nodeIDsEqual(testNode.ID, nodeID) {
- deleteClusterID(d.DataDir)
- logrus.Fatalf("node ID %d is in use, please restart", nodeID)
- }
- }
-
- if found, err := cleanDir(d.DataDir, true); err != nil {
- return err
- } else if found {
- if err := d.node.Close(); err != nil {
- return errors.Wrap(err, "node close for cleaning")
- }
- _, _ = cleanDir(d.DataDir, false)
- return fmt.Errorf("cleaned DB directory, now restart and join")
- }
-
- logrus.Infof("Joining dqlite cluster as address=%s, id=%d", d.NodeInfo.Address, d.NodeInfo.ID)
- return client.Add(ctx, d.NodeInfo)
-}
-
-func cleanDir(dataDir string, check bool) (bool, error) {
- dbDir := GetDBDir(dataDir)
- backupDir := filepath.Join(dbDir, fmt.Sprintf(".backup-%d", time.Now().Unix()))
- files, err := ioutil.ReadDir(dbDir)
- if err != nil {
- return false, errors.Wrap(err, "cleaning dqlite DB dir")
- }
-
- for _, file := range files {
- if file.IsDir() || strings.HasPrefix(file.Name(), ".") || ignoreFile[file.Name()] {
- continue
- }
- if check {
- return true, nil
- }
- if err := os.MkdirAll(backupDir, 0700); err != nil {
- return false, errors.Wrapf(err, "creating backup dir %s", backupDir)
- }
- oldName := filepath.Join(dbDir, file.Name())
- newName := filepath.Join(backupDir, file.Name())
- logrus.Infof("Backing up %s => %s", oldName, newName)
- if err := os.Rename(oldName, newName); err != nil {
- return false, errors.Wrapf(err, "backup %s", oldName)
- }
- }
-
- return false, nil
-}
diff --git a/pkg/dqlite/log.go b/pkg/dqlite/log.go
deleted file mode 100644
index bbd07c801798..000000000000
--- a/pkg/dqlite/log.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package dqlite
-
-import (
- "strings"
-
- "github.com/canonical/go-dqlite/client"
- "github.com/sirupsen/logrus"
-)
-
-func log() client.LogFunc {
- return func(level client.LogLevel, s string, i ...interface{}) {
- switch level {
- case client.LogDebug:
- logrus.Debugf(s, i...)
- case client.LogError:
- logrus.Errorf(s, i...)
- case client.LogInfo:
- if strings.HasPrefix(s, "connected") {
- logrus.Debugf(s, i...)
- } else {
- logrus.Infof(s, i...)
- }
- case client.LogWarn:
- logrus.Warnf(s, i...)
- }
- }
-}
diff --git a/pkg/dqlite/pipe/http.go b/pkg/dqlite/pipe/http.go
deleted file mode 100644
index 036d4c69a2a2..000000000000
--- a/pkg/dqlite/pipe/http.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package pipe
-
-import (
- "bufio"
- "context"
- "crypto/tls"
- "fmt"
- "net"
- "net/http"
-
- "github.com/pkg/errors"
-)
-
-func ToHTTP(ctx context.Context, url string, tlsConfig *tls.Config) (net.Conn, error) {
- request, err := http.NewRequest(http.MethodPost, url, nil)
- if err != nil {
- return nil, err
- }
-
- request = request.WithContext(ctx)
- netDial := &net.Dialer{}
-
- if deadline, ok := ctx.Deadline(); ok {
- netDial.Deadline = deadline
- }
-
- conn, err := tls.DialWithDialer(netDial, "tcp", request.URL.Host, tlsConfig)
- if err != nil {
- return nil, errors.Wrap(err, "tls dial")
- }
-
- err = request.Write(conn)
- if err != nil {
- return nil, errors.Wrap(err, "request write")
- }
-
- response, err := http.ReadResponse(bufio.NewReader(conn), request)
- if err != nil {
- return nil, errors.Wrap(err, "read request")
- }
- if response.StatusCode != http.StatusSwitchingProtocols {
- return nil, fmt.Errorf("expected 101 response, got: %d", response.StatusCode)
- }
-
- listener, err := net.Listen("unix", "")
- if err != nil {
- return nil, errors.Wrap(err, "Failed to create unix listener")
- }
- defer listener.Close()
-
- if err := Unix(conn, listener.Addr().String()); err != nil {
- return nil, err
- }
-
- return listener.Accept()
-}
diff --git a/pkg/dqlite/pipe/pipe.go b/pkg/dqlite/pipe/pipe.go
deleted file mode 100644
index fc000313182b..000000000000
--- a/pkg/dqlite/pipe/pipe.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package pipe
-
-import (
- "io"
- "net"
-
- "github.com/lxc/lxd/shared/eagain"
- "github.com/sirupsen/logrus"
-)
-
-func UnixPiper(srcs <-chan net.Conn, bindAddress string) {
- for src := range srcs {
- go Unix(src, bindAddress)
- }
-}
-
-func Unix(src net.Conn, target string) error {
- dst, err := net.Dial("unix", target)
- if err != nil {
- src.Close()
- return err
- }
-
- Connect(src, dst)
- return nil
-}
-
-func Connect(src net.Conn, dst net.Conn) {
- go func() {
- _, err := io.Copy(eagain.Writer{Writer: dst}, eagain.Reader{Reader: src})
- if err != nil && err != io.EOF {
- logrus.Debugf("copy pipe src->dst closed: %v", err)
- }
- src.Close()
- dst.Close()
- }()
-
- go func() {
- _, err := io.Copy(eagain.Writer{Writer: src}, eagain.Reader{Reader: dst})
- if err != nil {
- logrus.Debugf("copy pipe dst->src closed: %v", err)
- }
- src.Close()
- dst.Close()
- }()
-}
diff --git a/pkg/dqlite/proxy.go b/pkg/dqlite/proxy.go
deleted file mode 100644
index 0387ad023e0a..000000000000
--- a/pkg/dqlite/proxy.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package dqlite
-
-import (
- "context"
- "net"
- "net/http"
-
- "github.com/pkg/errors"
- "github.com/rancher/k3s/pkg/dqlite/pipe"
-)
-
-var (
- upgradeResponse = []byte("HTTP/1.1 101 Switching Protocols\r\nUpgrade: dqlite\r\n\r\n")
-)
-
-type proxy struct {
- conns chan net.Conn
-}
-
-func newProxy(ctx context.Context, bindAddress string) http.Handler {
- p := &proxy{
- conns: make(chan net.Conn, 100),
- }
- go func() {
- <-ctx.Done()
- close(p.conns)
- }()
- go pipe.UnixPiper(p.conns, bindAddress)
-
- return p
-}
-
-func (h *proxy) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
- hijacker, ok := rw.(http.Hijacker)
- if !ok {
- http.Error(rw, "failed to hijack", http.StatusInternalServerError)
- return
- }
-
- conn, _, err := hijacker.Hijack()
- if err != nil {
- err := errors.Wrap(err, "Hijack connection")
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- if n, err := conn.Write(upgradeResponse); err != nil || n != len(upgradeResponse) {
- conn.Close()
- return
- }
-
- h.conns <- conn
-}
diff --git a/pkg/dqlite/reset.go b/pkg/dqlite/reset.go
deleted file mode 100644
index d9a379897f9c..000000000000
--- a/pkg/dqlite/reset.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package dqlite
-
-import (
- "context"
-
- "github.com/canonical/go-dqlite/client"
- "github.com/sirupsen/logrus"
-)
-
-func (d *DQLite) Reset(ctx context.Context) error {
- logrus.Infof("Resetting cluster to single master")
- return d.node.Recover([]client.NodeInfo{
- d.NodeInfo,
- })
-}
diff --git a/pkg/dqlite/router.go b/pkg/dqlite/router.go
deleted file mode 100644
index a88ce07e4a52..000000000000
--- a/pkg/dqlite/router.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package dqlite
-
-import (
- "context"
- "crypto/x509"
- "encoding/json"
- "net/http"
-
- "github.com/canonical/go-dqlite"
- "github.com/canonical/go-dqlite/client"
- "github.com/gorilla/mux"
-)
-
-func router(ctx context.Context, next http.Handler, nodeInfo dqlite.NodeInfo, clientCA *x509.Certificate, clientCN string, bindAddress string) http.Handler {
- mux := mux.NewRouter()
- mux.Handle("/db/connect", newChecker(newProxy(ctx, bindAddress), clientCA, clientCN))
- mux.Handle("/db/info", infoHandler(ctx, nodeInfo, bindAddress))
- mux.NotFoundHandler = next
- return mux
-}
-
-func infoHandler(ctx context.Context, nodeInfo dqlite.NodeInfo, bindAddress string) http.Handler {
- return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
- client, err := client.New(ctx, bindAddress, client.WithLogFunc(log()))
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
- defer client.Close()
-
- info, err := client.Cluster(ctx)
- if err != nil {
- http.Error(rw, err.Error(), http.StatusInternalServerError)
- return
- }
-
- rw.Header().Set("Content-Type", "application/json")
- json.NewEncoder(rw).Encode(info)
- })
-}
-
-type checker struct {
- next http.Handler
- verify x509.VerifyOptions
- cn string
-}
-
-func newChecker(next http.Handler, ca *x509.Certificate, cn string) http.Handler {
- pool := x509.NewCertPool()
- pool.AddCert(ca)
- return &checker{
- next: next,
- verify: x509.VerifyOptions{
- Roots: pool,
- KeyUsages: []x509.ExtKeyUsage{
- x509.ExtKeyUsageClientAuth,
- },
- DNSName: cn,
- },
- cn: cn,
- }
-}
-
-func (c *checker) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- if !c.check(req) {
- http.Error(rw, "unauthorized", http.StatusUnauthorized)
- return
- }
- c.next.ServeHTTP(rw, req)
-}
-
-func (c *checker) check(r *http.Request) bool {
- for _, cert := range r.TLS.PeerCertificates {
- _, err := cert.Verify(c.verify)
- if err == nil {
- return cert.Subject.CommonName == c.cn
- }
- }
- return false
-}
diff --git a/pkg/dqlite/server.go b/pkg/dqlite/server.go
deleted file mode 100644
index fd7db0f4c520..000000000000
--- a/pkg/dqlite/server.go
+++ /dev/null
@@ -1,245 +0,0 @@
-package dqlite
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "math/rand"
- "net/http"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-
- "github.com/canonical/go-dqlite"
- "github.com/canonical/go-dqlite/client"
- "github.com/pkg/errors"
- controllerclient "github.com/rancher/k3s/pkg/dqlite/controller/client"
- "github.com/rancher/k3s/pkg/dqlite/dialer"
- dqlitedriver "github.com/rancher/kine/pkg/drivers/dqlite"
- v1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
- "github.com/sirupsen/logrus"
- "k8s.io/apimachinery/pkg/util/net"
-)
-
-const (
- PeersFile = "peers.db"
- NodeIDFile = "node-id"
-)
-
-var (
- ignoreFile = map[string]bool{
- PeersFile: true,
- NodeIDFile: true,
- }
-)
-
-type Certs struct {
- ServerTrust *x509.Certificate
- ClientTrust *x509.Certificate
- ClientCert tls.Certificate
-}
-
-type DQLite struct {
- ClientCA string
- ClientCAKey string
- ClientCert string
- ClientCertKey string
- ServerCA string
- ServerCAKey string
- AdvertiseIP string
- AdvertisePort int
- DataDir string
- NodeStore client.NodeStore
- NodeInfo client.NodeInfo
- node *dqlite.Node
- StorageEndpoint string
- NodeControllerGetter NodeControllerGetter
- clientOpts []client.Option
-}
-
-type NodeControllerGetter func() v1.NodeController
-
-func New(dataDir, advertiseIP string, advertisePort int, getter NodeControllerGetter) *DQLite {
- return &DQLite{
- AdvertiseIP: advertiseIP,
- AdvertisePort: advertisePort,
- DataDir: dataDir,
- NodeControllerGetter: getter,
- }
-}
-
-func (d *DQLite) Start(ctx context.Context, initCluster, resetCluster bool, certs *Certs, next http.Handler) (http.Handler, error) {
- bindAddress := d.getBindAddress()
-
- clientTLSConfig, err := getClientTLSConfig(certs.ClientCert, certs.ServerTrust)
- if err != nil {
- return nil, err
- }
-
- advertise, err := getAdvertiseAddress(d.AdvertiseIP, d.AdvertisePort)
- if err != nil {
- return nil, errors.Wrap(err, "get advertise address")
- }
-
- dial, err := getDialer(advertise, bindAddress, clientTLSConfig)
- if err != nil {
- return nil, err
- }
-
- dqlitedriver.Dialer = dial
- dqlitedriver.Logger = log()
-
- d.clientOpts = append(d.clientOpts, client.WithDialFunc(dial), client.WithLogFunc(log()))
-
- nodeInfo, node, err := getNode(d.DataDir, advertise, bindAddress, initCluster, dial)
- if err != nil {
- return nil, err
- }
-
- d.NodeInfo = nodeInfo
- d.node = node
-
- go func() {
- <-ctx.Done()
- node.Close()
- }()
-
- if err := d.nodeStore(ctx, initCluster); err != nil {
- return nil, err
- }
-
- go d.startController(ctx)
-
- if !resetCluster {
- if err := node.Start(); err != nil {
- return nil, err
- }
- }
-
- return router(ctx, next, nodeInfo, certs.ClientTrust, "kube-apiserver", bindAddress), nil
-}
-
-func (d *DQLite) startController(ctx context.Context) {
- for {
- if nc := d.NodeControllerGetter(); nc != nil {
- if os.Getenv("NODE_NAME") == "" {
- logrus.Errorf("--disable-agent is not compatible with dqlite")
- } else {
- break
- }
- }
- time.Sleep(time.Second)
- }
-
- controllerclient.Register(ctx, os.Getenv("NODE_NAME"), d.NodeInfo, d.NodeStore, d.NodeControllerGetter(), d.clientOpts)
-}
-
-func (d *DQLite) nodeStore(ctx context.Context, initCluster bool) error {
- peerDB := filepath.Join(GetDBDir(d.DataDir), PeersFile)
- ns, err := client.DefaultNodeStore(peerDB)
- if err != nil {
- return err
- }
- d.NodeStore = ns
- d.StorageEndpoint = fmt.Sprintf("dqlite://?peer-file=%s", peerDB)
- if initCluster {
- if err := dqlitedriver.AddPeers(ctx, d.NodeStore, d.NodeInfo); err != nil {
- return err
- }
- }
- return nil
-}
-
-func getAdvertiseAddress(advertiseIP string, advertisePort int) (string, error) {
- ip := advertiseIP
- if ip == "" {
- ipAddr, err := net.ChooseHostInterface()
- if err != nil {
- return "", err
- }
- ip = ipAddr.String()
- }
-
- return fmt.Sprintf("%s:%d", ip, advertisePort), nil
-}
-
-func getClientTLSConfig(cert tls.Certificate, ca *x509.Certificate) (*tls.Config, error) {
- tlsConfig := &tls.Config{
- RootCAs: x509.NewCertPool(),
- Certificates: []tls.Certificate{
- cert,
- },
- ServerName: "kubernetes",
- }
- tlsConfig.RootCAs.AddCert(ca)
-
- return tlsConfig, nil
-}
-
-func getDialer(advertiseAddress, bindAddress string, tlsConfig *tls.Config) (client.DialFunc, error) {
- return dialer.NewHTTPDialer(advertiseAddress, bindAddress, tlsConfig)
-}
-
-func GetDBDir(dataDir string) string {
- return filepath.Join(dataDir, "db", "state.dqlite")
-}
-
-func getNode(dataDir string, advertiseAddress, bindAddress string, initCluster bool, dial client.DialFunc) (dqlite.NodeInfo, *dqlite.Node, error) {
- id, err := getClusterID(initCluster, dataDir)
- if err != nil {
- return dqlite.NodeInfo{}, nil, errors.Wrap(err, "reading cluster id")
- }
-
- dbDir := GetDBDir(dataDir)
-
- node, err := dqlite.New(id, advertiseAddress, dbDir,
- dqlite.WithBindAddress(bindAddress),
- dqlite.WithDialFunc(dial),
- dqlite.WithNetworkLatency(20*time.Millisecond))
- return dqlite.NodeInfo{
- ID: id,
- Address: advertiseAddress,
- }, node, err
-}
-
-func writeClusterID(id uint64, dataDir string) error {
- idFile := filepath.Join(GetDBDir(dataDir), NodeIDFile)
- if err := os.MkdirAll(filepath.Dir(idFile), 0700); err != nil {
- return err
- }
- return ioutil.WriteFile(idFile, []byte(strconv.FormatUint(id, 10)), 0644)
-}
-
-func deleteClusterID(dataDir string) {
- os.Remove(filepath.Join(GetDBDir(dataDir), NodeIDFile))
-}
-
-func getClusterID(initCluster bool, dataDir string) (uint64, error) {
- idFile := filepath.Join(GetDBDir(dataDir), NodeIDFile)
- content, err := ioutil.ReadFile(idFile)
- if os.IsNotExist(err) {
- content = nil
- } else if err != nil {
- return 0, err
- }
-
- idStr := strings.TrimSpace(string(content))
- if idStr == "" {
- id := uint64(rand.Intn(1 << 20))
- if initCluster {
- id = 1
- }
- return id, writeClusterID(id, dataDir)
- }
-
- return strconv.ParseUint(idStr, 10, 64)
-}
-
-func (d *DQLite) getBindAddress() string {
- // only anonymous works???
- return "@" + filepath.Join(GetDBDir(d.DataDir), "dqlite.sock")
-}
diff --git a/pkg/etcd/controller.go b/pkg/etcd/controller.go
new file mode 100644
index 000000000000..cf885f1e1f52
--- /dev/null
+++ b/pkg/etcd/controller.go
@@ -0,0 +1,89 @@
+package etcd
+
+import (
+ "context"
+ "os"
+ "time"
+
+ controllerv1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
+ "github.com/sirupsen/logrus"
+ v1 "k8s.io/api/core/v1"
+)
+
+const (
+ nodeID = "etcd.k3s.cattle.io/node-name"
+ nodeAddress = "etcd.k3s.cattle.io/node-address"
+ master = "node-role.kubernetes.io/master"
+ etcdRole = "node-role.kubernetes.io/etcd"
+)
+
+type NodeControllerGetter func() controllerv1.NodeController
+
+func Register(ctx context.Context, etcd *ETCD, nodes controllerv1.NodeController) {
+ h := &handler{
+ etcd: etcd,
+ nodeController: nodes,
+ ctx: ctx,
+ }
+ nodes.OnChange(ctx, "managed-etcd-controller", h.sync)
+ nodes.OnRemove(ctx, "managed-etcd-controller", h.onRemove)
+}
+
+type handler struct {
+ etcd *ETCD
+ nodeController controllerv1.NodeController
+ ctx context.Context
+}
+
+func (h *handler) sync(key string, node *v1.Node) (*v1.Node, error) {
+ if node == nil {
+ return nil, nil
+ }
+
+ nodeName := os.Getenv("NODE_NAME")
+ if nodeName == "" {
+ logrus.Debug("waiting for node to be assigned for etcd controller")
+ h.nodeController.EnqueueAfter(key, 5*time.Second)
+ return node, nil
+ }
+
+ if key == nodeName {
+ return h.handleSelf(node)
+ }
+
+ return node, nil
+}
+
+func (h *handler) handleSelf(node *v1.Node) (*v1.Node, error) {
+ if node.Annotations[nodeID] == h.etcd.name &&
+ node.Annotations[nodeAddress] == h.etcd.address &&
+ node.Labels[etcdRole] == "true" &&
+ node.Labels[master] == "true" {
+ return node, nil
+ }
+
+ node = node.DeepCopy()
+ if node.Annotations == nil {
+ node.Annotations = map[string]string{}
+ }
+ node.Annotations[nodeID] = h.etcd.name
+ node.Annotations[nodeAddress] = h.etcd.address
+ node.Labels[etcdRole] = "true"
+ node.Labels[master] = "true"
+
+ return h.nodeController.Update(node)
+}
+
+func (h *handler) onRemove(key string, node *v1.Node) (*v1.Node, error) {
+ if _, ok := node.Labels[etcdRole]; !ok {
+ return node, nil
+ }
+
+ id := node.Annotations[nodeID]
+ address := node.Annotations[nodeAddress]
+ if address == "" {
+ return node, nil
+ }
+
+ return node, h.etcd.removePeer(h.ctx, id, address)
+}
diff --git a/pkg/etcd/etcd.go b/pkg/etcd/etcd.go
new file mode 100644
index 000000000000..aa41e9e45979
--- /dev/null
+++ b/pkg/etcd/etcd.go
@@ -0,0 +1,421 @@
+package etcd
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/gorilla/mux"
+ "github.com/pkg/errors"
+ certutil "github.com/rancher/dynamiclistener/cert"
+ "github.com/rancher/k3s/pkg/clientaccess"
+ "github.com/rancher/k3s/pkg/daemons/config"
+ "github.com/rancher/k3s/pkg/daemons/executor"
+ "github.com/sirupsen/logrus"
+ etcd "go.etcd.io/etcd/clientv3"
+ "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ utilnet "k8s.io/apimachinery/pkg/util/net"
+)
+
+type ETCD struct {
+ client *etcd.Client
+ config *config.Control
+ name string
+ runtime *config.ControlRuntime
+ address string
+}
+
+type Members struct {
+ Members []*etcdserverpb.Member `json:"members"`
+}
+
+func (e *ETCD) EndpointName() string {
+ return "etcd"
+}
+
+func (e *ETCD) Test(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+ members, err := e.client.MemberList(ctx)
+ if err != nil {
+ return err
+ }
+
+ var cluster []string
+ for _, member := range members.Members {
+ for _, peerURL := range member.PeerURLs {
+ if peerURL == e.peerURL() && e.name == member.Name {
+ return nil
+ }
+ }
+ if len(member.PeerURLs) > 0 {
+ cluster = append(cluster, fmt.Sprintf("%s=%s", member.Name, member.PeerURLs[0]))
+ }
+ }
+ msg := fmt.Sprintf("This server is a not a member of the etcd cluster "+"found %v and expecting to contain %s=%s", cluster, e.name, e.address)
+ logrus.Error(msg)
+ return fmt.Errorf(msg)
+}
+
+func walDir(config *config.Control) string {
+ return filepath.Join(dataDir(config), "member", "wal")
+}
+
+func dataDir(config *config.Control) string {
+ return filepath.Join(config.DataDir, "db", "etcd")
+}
+
+func nameFile(config *config.Control) string {
+ return filepath.Join(dataDir(config), "name")
+}
+
+func (e *ETCD) IsInitialized(ctx context.Context, config *config.Control) (bool, error) {
+ if s, err := os.Stat(walDir(config)); err == nil && s.IsDir() {
+ return true, nil
+ } else if os.IsNotExist(err) {
+ return false, nil
+ } else {
+ return false, errors.Wrapf(err, "failed to test if etcd is initialized")
+ }
+}
+
+func (e *ETCD) Reset(ctx context.Context) error {
+ go func() {
+ for {
+ time.Sleep(5 * time.Second)
+ if err := e.Test(ctx); err == nil {
+ members, err := e.client.MemberList(ctx)
+ if err != nil {
+ continue
+ }
+
+ if len(members.Members) == 1 && members.Members[0].Name == e.name {
+ logrus.Infof("etcd is running, restart without --cluster-reset flag now. Backup and delete ${datadir}/server/db on each peer etcd server and rejoin the nodes")
+ os.Exit(0)
+ }
+ }
+ }
+ }()
+ return e.newCluster(ctx, true)
+}
+
+func (e *ETCD) Start(ctx context.Context, clientAccess *clientaccess.Info) error {
+ existingCluster, err := e.IsInitialized(ctx, e.config)
+ if err != nil {
+ return errors.Wrapf(err, "failed to validation")
+ }
+
+ e.config.Runtime.ClusterControllerStart = func(ctx context.Context) error {
+ Register(ctx, e, e.config.Runtime.Core.Core().V1().Node())
+ return nil
+ }
+
+ if existingCluster {
+ opt, err := executor.CurrentETCDOptions()
+ if err != nil {
+ return err
+ }
+ return e.cluster(ctx, false, opt)
+ }
+
+ if clientAccess == nil {
+ return e.newCluster(ctx, false)
+ }
+ err = e.join(ctx, clientAccess)
+ return errors.Wrap(err, "joining etcd cluster")
+}
+
+func (e *ETCD) join(ctx context.Context, clientAccessInfo *clientaccess.Info) error {
+ resp, err := clientaccess.Get("/db/info", clientAccessInfo)
+ if err != nil {
+ return err
+ }
+
+ var memberList Members
+ if err := json.Unmarshal(resp, &memberList); err != nil {
+ return err
+ }
+
+ var clientURLs []string
+ for _, member := range memberList.Members {
+ clientURLs = append(clientURLs, member.ClientURLs...)
+ }
+
+ client, err := joinClient(ctx, e.runtime, clientURLs)
+ if err != nil {
+ return err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
+ defer cancel()
+
+ var (
+ cluster []string
+ add = true
+ )
+
+ members, err := client.MemberList(ctx)
+ if err != nil {
+ logrus.Errorf("failed to get member list from cluster, will assume this member is already added")
+ members = &etcd.MemberListResponse{
+ Members: append(memberList.Members, &etcdserverpb.Member{
+ Name: e.name,
+ PeerURLs: []string{e.peerURL()},
+ }),
+ }
+ add = false
+ }
+
+ for _, member := range members.Members {
+ for _, peer := range member.PeerURLs {
+ u, err := url.Parse(peer)
+ if err != nil {
+ return err
+ }
+ // An uninitialized member won't have a name
+ if u.Hostname() == e.address && (member.Name == e.name || member.Name == "") {
+ add = false
+ }
+ if member.Name == "" && u.Hostname() == e.address {
+ member.Name = e.name
+ }
+ if len(member.PeerURLs) > 0 {
+ cluster = append(cluster, fmt.Sprintf("%s=%s", member.Name, member.PeerURLs[0]))
+ }
+ }
+ }
+
+ if add {
+ logrus.Infof("Adding %s to etcd cluster %v", e.peerURL(), cluster)
+ if _, err = client.MemberAdd(ctx, []string{e.peerURL()}); err != nil {
+ return err
+ }
+ cluster = append(cluster, fmt.Sprintf("%s=%s", e.name, e.peerURL()))
+ }
+
+ logrus.Infof("Starting etcd for cluster %v", cluster)
+ return e.cluster(ctx, false, executor.InitialOptions{
+ Cluster: strings.Join(cluster, ","),
+ State: "existing",
+ })
+}
+
+func (e *ETCD) Register(ctx context.Context, config *config.Control, l net.Listener, handler http.Handler) (net.Listener, http.Handler, error) {
+ e.config = config
+ e.runtime = config.Runtime
+
+ client, err := newClient(ctx, e.runtime)
+ if err != nil {
+ return nil, nil, err
+ }
+ e.client = client
+
+ address, err := getAdvertiseAddress(config.AdvertiseIP)
+ if err != nil {
+ return nil, nil, err
+ }
+ e.address = address
+
+ e.config.Datastore.Endpoint = "https://127.0.0.1:2379"
+ e.config.Datastore.Config.CAFile = e.runtime.ETCDServerCA
+ e.config.Datastore.Config.CertFile = e.runtime.ClientETCDCert
+ e.config.Datastore.Config.KeyFile = e.runtime.ClientETCDKey
+
+ if err := e.setName(); err != nil {
+ return nil, nil, err
+ }
+
+ return l, e.handler(handler), err
+}
+
+func (e *ETCD) setName() error {
+ fileName := nameFile(e.config)
+ data, err := ioutil.ReadFile(fileName)
+ if os.IsNotExist(err) {
+ h, err := os.Hostname()
+ if err != nil {
+ return err
+ }
+ e.name = strings.SplitN(h, ".", 2)[0] + "-" + uuid.New().String()[:8]
+ if err := os.MkdirAll(filepath.Dir(fileName), 0755); err != nil {
+ return err
+ }
+ return ioutil.WriteFile(fileName, []byte(e.name), 0655)
+ } else if err != nil {
+ return err
+ }
+ e.name = string(data)
+ return nil
+}
+
+func (e *ETCD) handler(next http.Handler) http.Handler {
+ mux := mux.NewRouter()
+ mux.Handle("/db/info", e.infoHandler())
+ mux.NotFoundHandler = next
+ return mux
+}
+
+func (e *ETCD) infoHandler() http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ ctx, cancel := context.WithTimeout(req.Context(), 2*time.Second)
+ defer cancel()
+
+ members, err := e.client.MemberList(ctx)
+ if err != nil {
+ json.NewEncoder(rw).Encode(&Members{
+ Members: []*etcdserverpb.Member{
+ {
+ Name: e.name,
+ PeerURLs: []string{e.peerURL()},
+ ClientURLs: []string{e.clientURL()},
+ },
+ },
+ })
+ return
+ }
+
+ rw.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(rw).Encode(&Members{
+ Members: members.Members,
+ })
+ })
+}
+
+func joinClient(ctx context.Context, runtime *config.ControlRuntime, peers []string) (*etcd.Client, error) {
+ tlsConfig, err := toTLSConfig(runtime)
+ if err != nil {
+ return nil, err
+ }
+
+ cfg := etcd.Config{
+ Endpoints: peers,
+ TLS: tlsConfig,
+ Context: ctx,
+ }
+
+ return etcd.New(cfg)
+}
+
+func newClient(ctx context.Context, runtime *config.ControlRuntime) (*etcd.Client, error) {
+ tlsConfig, err := toTLSConfig(runtime)
+ if err != nil {
+ return nil, err
+ }
+
+ cfg := etcd.Config{
+ Context: ctx,
+ Endpoints: []string{"https://127.0.0.1:2379"},
+ TLS: tlsConfig,
+ }
+
+ return etcd.New(cfg)
+}
+
+func toTLSConfig(runtime *config.ControlRuntime) (*tls.Config, error) {
+ clientCert, err := tls.LoadX509KeyPair(runtime.ClientETCDCert, runtime.ClientETCDKey)
+ if err != nil {
+ return nil, err
+ }
+
+ pool, err := certutil.NewPool(runtime.ETCDServerCA)
+ if err != nil {
+ return nil, err
+ }
+
+ return &tls.Config{
+ RootCAs: pool,
+ Certificates: []tls.Certificate{clientCert},
+ }, nil
+}
+
+func getAdvertiseAddress(advertiseIP string) (string, error) {
+ ip := advertiseIP
+ if ip == "" {
+ ipAddr, err := utilnet.ChooseHostInterface()
+ if err != nil {
+ return "", err
+ }
+ ip = ipAddr.String()
+ }
+
+ return ip, nil
+}
+
+func (e *ETCD) newCluster(ctx context.Context, reset bool) error {
+ return e.cluster(ctx, reset, executor.InitialOptions{
+ AdvertisePeerURL: fmt.Sprintf("https://%s:2380", e.address),
+ Cluster: fmt.Sprintf("%s=https://%s:2380", e.name, e.address),
+ State: "new",
+ })
+}
+
+func (e *ETCD) peerURL() string {
+ return fmt.Sprintf("https://%s:2380", e.address)
+}
+
+func (e *ETCD) clientURL() string {
+ return fmt.Sprintf("https://%s:2379", e.address)
+}
+
+func (e *ETCD) cluster(ctx context.Context, forceNew bool, options executor.InitialOptions) error {
+ return executor.ETCD(executor.ETCDConfig{
+ Name: e.name,
+ InitialOptions: options,
+ ForceNewCluster: forceNew,
+ ListenClientURLs: fmt.Sprintf(e.clientURL() + ",https://127.0.0.1:2379"),
+ ListenMetricsURLs: fmt.Sprintf("http://127.0.0.1:2381"),
+ ListenPeerURLs: e.peerURL(),
+ AdvertiseClientURLs: e.clientURL(),
+ DataDir: dataDir(e.config),
+ ServerTrust: executor.ServerTrust{
+ CertFile: e.config.Runtime.ServerETCDCert,
+ KeyFile: e.config.Runtime.ServerETCDKey,
+ ClientCertAuth: true,
+ TrustedCAFile: e.config.Runtime.ETCDServerCA,
+ },
+ PeerTrust: executor.PeerTrust{
+ CertFile: e.config.Runtime.PeerServerClientETCDCert,
+ KeyFile: e.config.Runtime.PeerServerClientETCDKey,
+ ClientCertAuth: true,
+ TrustedCAFile: e.config.Runtime.ETCDPeerCA,
+ },
+ ElectionTimeout: 5000,
+ HeartbeatInterval: 500,
+ })
+}
+
+func (e *ETCD) removePeer(ctx context.Context, id, address string) error {
+ members, err := e.client.MemberList(ctx)
+ if err != nil {
+ return err
+ }
+
+ for _, member := range members.Members {
+ if member.Name != id {
+ continue
+ }
+ for _, peerURL := range member.PeerURLs {
+ u, err := url.Parse(peerURL)
+ if err != nil {
+ return err
+ }
+ if u.Hostname() == address {
+ logrus.Infof("Removing name=%s id=%d address=%s from etcd", member.Name, member.ID, address)
+ _, err := e.client.MemberRemove(ctx, member.ID)
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/pkg/nodeconfig/nodeconfig.go b/pkg/nodeconfig/nodeconfig.go
index 77946f3d448b..10fe16b1f63d 100644
--- a/pkg/nodeconfig/nodeconfig.go
+++ b/pkg/nodeconfig/nodeconfig.go
@@ -9,14 +9,18 @@ import (
"strings"
"github.com/pkg/errors"
+ "github.com/rancher/k3s/pkg/version"
corev1 "k8s.io/api/core/v1"
)
+var (
+ NodeArgsAnnotation = version.Program + ".io/node-args"
+ NodeEnvAnnotation = version.Program + ".io/node-env"
+ NodeConfigHashAnnotation = version.Program + ".io/node-config-hash"
+)
+
const (
- NodeArgsAnnotation = "k3s.io/node-args"
- NodeEnvAnnotation = "k3s.io/node-env"
- NodeConfigHashAnnotation = "k3s.io/node-config-hash"
- OmittedValue = "********"
+ OmittedValue = "********"
)
func getNodeArgs() (string, error) {
@@ -47,7 +51,7 @@ func getNodeEnv() (string, error) {
k3sEnv := make(map[string]string)
for _, v := range os.Environ() {
keyValue := strings.SplitN(v, "=", 2)
- if strings.HasPrefix(keyValue[0], "K3S_") {
+ if strings.HasPrefix(keyValue[0], version.ProgramUpper+"_") {
k3sEnv[keyValue[0]] = keyValue[1]
}
}
@@ -93,10 +97,10 @@ func SetNodeConfigAnnotations(node *corev1.Node) (bool, error) {
func isSecret(key string) bool {
secretData := []string{
- "K3S_TOKEN",
- "K3S_DATASTORE_ENDPOINT",
- "K3S_AGENT_TOKEN",
- "K3S_CLUSTER_SECRET",
+ version.ProgramUpper + "_TOKEN",
+ version.ProgramUpper + "_DATASTORE_ENDPOINT",
+ version.ProgramUpper + "_AGENT_TOKEN",
+ version.ProgramUpper + "_CLUSTER_SECRET",
"--token",
"-t",
"--agent-token",
diff --git a/pkg/nodeconfig/nodeconfig_test.go b/pkg/nodeconfig/nodeconfig_test.go
index 313b66f38a99..823ee08cf7ba 100644
--- a/pkg/nodeconfig/nodeconfig_test.go
+++ b/pkg/nodeconfig/nodeconfig_test.go
@@ -4,6 +4,7 @@ import (
"os"
"testing"
+ "github.com/rancher/k3s/pkg/version"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -27,7 +28,7 @@ var FakeNodeWithAnnotation = &corev1.Node{
Name: "fakeNode-with-annotation",
Annotations: map[string]string{
NodeArgsAnnotation: `["server","--no-flannel"]`,
- NodeEnvAnnotation: `{"K3S_NODE_NAME":"fakeNode-with-annotation"}`,
+ NodeEnvAnnotation: `{"` + version.ProgramUpper + `_NODE_NAME":"fakeNode-with-annotation"}`,
NodeConfigHashAnnotation: "LNQOAOIMOQIBRMEMACW7LYHXUNPZADF6RFGOSPIHJCOS47UVUJAA====",
},
},
@@ -40,8 +41,8 @@ func assertEqual(t *testing.T, a interface{}, b interface{}) {
}
func TestSetEmptyNodeConfigAnnotations(t *testing.T) {
- os.Args = []string{"k3s", "server", "--no-flannel"}
- os.Setenv("K3S_NODE_NAME", "fakeNode-no-annotation")
+ os.Args = []string{version.Program, "server", "--no-flannel"}
+ os.Setenv(version.ProgramUpper+"_NODE_NAME", "fakeNode-no-annotation")
nodeUpdated, err := SetNodeConfigAnnotations(FakeNodeWithNoAnnotation)
if err != nil {
t.Fatalf("Failed to set node config annotation: %v", err)
@@ -52,7 +53,7 @@ func TestSetEmptyNodeConfigAnnotations(t *testing.T) {
actualArgs := FakeNodeWithNoAnnotation.Annotations[NodeArgsAnnotation]
assertEqual(t, expectedArgs, actualArgs)
- expectedEnv := `{"K3S_NODE_NAME":"fakeNode-no-annotation"}`
+ expectedEnv := `{"` + version.ProgramUpper + `_NODE_NAME":"fakeNode-no-annotation"}`
actualEnv := FakeNodeWithNoAnnotation.Annotations[NodeEnvAnnotation]
assertEqual(t, expectedEnv, actualEnv)
@@ -63,8 +64,8 @@ func TestSetEmptyNodeConfigAnnotations(t *testing.T) {
func TestSetExistingNodeConfigAnnotations(t *testing.T) {
// adding same config
- os.Args = []string{"k3s", "server", "--no-flannel"}
- os.Setenv("K3S_NODE_NAME", "fakeNode-with-annotation")
+ os.Args = []string{version.Program, "server", "--no-flannel"}
+ os.Setenv(version.ProgramUpper+"_NODE_NAME", "fakeNode-with-annotation")
nodeUpdated, err := SetNodeConfigAnnotations(FakeNodeWithAnnotation)
if err != nil {
t.Fatalf("Failed to set node config annotation: %v", err)
@@ -73,7 +74,7 @@ func TestSetExistingNodeConfigAnnotations(t *testing.T) {
}
func TestSetArgsWithEqual(t *testing.T) {
- os.Args = []string{"k3s", "server", "--no-flannel", "--write-kubeconfig-mode=777"}
+ os.Args = []string{version.Program, "server", "--no-flannel", "--write-kubeconfig-mode=777"}
os.Setenv("K3S_NODE_NAME", "fakeNode-with-no-annotation")
nodeUpdated, err := SetNodeConfigAnnotations(FakeNodeWithNoAnnotation)
if err != nil {
diff --git a/pkg/server/router.go b/pkg/server/router.go
index a3b9f23c5bb3..316d6d45df2f 100644
--- a/pkg/server/router.go
+++ b/pkg/server/router.go
@@ -17,6 +17,7 @@ import (
"github.com/rancher/k3s/pkg/bootstrap"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/passwd"
+ "github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/json"
)
@@ -26,28 +27,29 @@ const (
)
func router(serverConfig *config.Control, tunnel http.Handler, ca []byte) http.Handler {
+ prefix := "/v1-" + version.Program
authed := mux.NewRouter()
- authed.Use(authMiddleware(serverConfig, "k3s:agent"))
+ authed.Use(authMiddleware(serverConfig, version.Program+":agent"))
authed.NotFoundHandler = serverConfig.Runtime.Handler
- authed.Path("/v1-k3s/serving-kubelet.crt").Handler(servingKubeletCert(serverConfig, serverConfig.Runtime.ServingKubeletKey))
- authed.Path("/v1-k3s/client-kubelet.crt").Handler(clientKubeletCert(serverConfig, serverConfig.Runtime.ClientKubeletKey))
- authed.Path("/v1-k3s/client-kube-proxy.crt").Handler(fileHandler(serverConfig.Runtime.ClientKubeProxyCert, serverConfig.Runtime.ClientKubeProxyKey))
- authed.Path("/v1-k3s/client-k3s-controller.crt").Handler(fileHandler(serverConfig.Runtime.ClientK3sControllerCert, serverConfig.Runtime.ClientK3sControllerKey))
- authed.Path("/v1-k3s/client-ca.crt").Handler(fileHandler(serverConfig.Runtime.ClientCA))
- authed.Path("/v1-k3s/server-ca.crt").Handler(fileHandler(serverConfig.Runtime.ServerCA))
- authed.Path("/v1-k3s/config").Handler(configHandler(serverConfig))
+ authed.Path(prefix + "/serving-kubelet.crt").Handler(servingKubeletCert(serverConfig, serverConfig.Runtime.ServingKubeletKey))
+ authed.Path(prefix + "/client-kubelet.crt").Handler(clientKubeletCert(serverConfig, serverConfig.Runtime.ClientKubeletKey))
+ authed.Path(prefix + "/client-kube-proxy.crt").Handler(fileHandler(serverConfig.Runtime.ClientKubeProxyCert, serverConfig.Runtime.ClientKubeProxyKey))
+ authed.Path(prefix + "/client-" + version.Program + "-controller.crt").Handler(fileHandler(serverConfig.Runtime.ClientK3sControllerCert, serverConfig.Runtime.ClientK3sControllerKey))
+ authed.Path(prefix + "/client-ca.crt").Handler(fileHandler(serverConfig.Runtime.ClientCA))
+ authed.Path(prefix + "/server-ca.crt").Handler(fileHandler(serverConfig.Runtime.ServerCA))
+ authed.Path(prefix + "/config").Handler(configHandler(serverConfig))
nodeAuthed := mux.NewRouter()
nodeAuthed.Use(authMiddleware(serverConfig, "system:nodes"))
- nodeAuthed.Path("/v1-k3s/connect").Handler(tunnel)
+ nodeAuthed.Path(prefix + "/connect").Handler(tunnel)
nodeAuthed.NotFoundHandler = authed
serverAuthed := mux.NewRouter()
- serverAuthed.Use(authMiddleware(serverConfig, "k3s:server"))
+ serverAuthed.Use(authMiddleware(serverConfig, version.Program+":server"))
serverAuthed.NotFoundHandler = nodeAuthed
serverAuthed.Path("/db/info").Handler(nodeAuthed)
if serverConfig.Runtime.HTTPBootstrap {
- serverAuthed.Path("/v1-k3s/server-bootstrap").Handler(bootstrap.Handler(&serverConfig.Runtime.ControlRuntimeBootstrap))
+ serverAuthed.Path(prefix + "/server-bootstrap").Handler(bootstrap.Handler(&serverConfig.Runtime.ControlRuntimeBootstrap))
}
staticDir := filepath.Join(serverConfig.DataDir, "static")
@@ -68,17 +70,17 @@ func cacerts(ca []byte) http.Handler {
}
func getNodeInfo(req *http.Request) (string, string, error) {
- nodeNames := req.Header["K3s-Node-Name"]
- if len(nodeNames) != 1 || nodeNames[0] == "" {
+ nodeName := req.Header.Get(version.Program + "-Node-Name")
+ if nodeName == "" {
return "", "", errors.New("node name not set")
}
- nodePasswords := req.Header["K3s-Node-Password"]
- if len(nodePasswords) != 1 || nodePasswords[0] == "" {
+ nodePassword := req.Header.Get(version.Program + "-Node-Password")
+ if nodePassword == "" {
return "", "", errors.New("node password not set")
}
- return strings.ToLower(nodeNames[0]), nodePasswords[0], nil
+ return strings.ToLower(nodeName), nodePassword, nil
}
func getCACertAndKeys(caCertFile, caKeyFile, signingKeyFile string) ([]*x509.Certificate, crypto.Signer, crypto.Signer, error) {
diff --git a/pkg/server/server.go b/pkg/server/server.go
index db5e3d7580e1..1da457ce2dad 100644
--- a/pkg/server/server.go
+++ b/pkg/server/server.go
@@ -25,6 +25,7 @@ import (
"github.com/rancher/k3s/pkg/servicelb"
"github.com/rancher/k3s/pkg/static"
"github.com/rancher/k3s/pkg/util"
+ "github.com/rancher/k3s/pkg/version"
v1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/leader"
"github.com/rancher/wrangler/pkg/resolvehome"
@@ -114,12 +115,17 @@ func runControllers(ctx context.Context, config *Config) error {
return err
}
+ controlConfig.Runtime.Core = sc.Core
+ if config.ControlConfig.Runtime.ClusterControllerStart != nil {
+ if err := config.ControlConfig.Runtime.ClusterControllerStart(ctx); err != nil {
+ return errors.Wrapf(err, "starting cluster controllers")
+ }
+ }
+
if err := sc.Start(ctx); err != nil {
return err
}
- controlConfig.Runtime.Core = sc.Core
-
start := func(ctx context.Context) {
if err := masterControllers(ctx, sc, config); err != nil {
panic(err)
@@ -138,7 +144,7 @@ func runControllers(ctx context.Context, config *Config) error {
logrus.Fatal("controllers exited")
}()
} else {
- go leader.RunOrDie(ctx, "", "k3s", sc.K8s, start)
+ go leader.RunOrDie(ctx, "", version.Program, sc.K8s, start)
}
return nil
@@ -256,7 +262,7 @@ func writeKubeConfig(certs string, config *Config) error {
kubeConfig, err := HomeKubeConfig(true, config.Rootless)
def := true
if err != nil {
- kubeConfig = filepath.Join(config.ControlConfig.DataDir, "kubeconfig-k3s.yaml")
+ kubeConfig = filepath.Join(config.ControlConfig.DataDir, "kubeconfig-"+version.Program+".yaml")
def = false
}
kubeConfigSymlink := kubeConfig
@@ -333,7 +339,7 @@ func printToken(httpsPort int, advertiseIP, prefix, cmd string) {
ip = hostIP.String()
}
- logrus.Infof("%s k3s %s -s https://%s:%d -t ${NODE_TOKEN}", prefix, cmd, ip, httpsPort)
+ logrus.Infof("%s %s %s -s https://%s:%d -t ${NODE_TOKEN}", prefix, version.Program, cmd, ip, httpsPort)
}
func FormatToken(token string, certFile string) (string, error) {
diff --git a/pkg/servicelb/controller.go b/pkg/servicelb/controller.go
index b09dcc8a0f83..0cfb0c256eaf 100644
--- a/pkg/servicelb/controller.go
+++ b/pkg/servicelb/controller.go
@@ -6,6 +6,7 @@ import (
"sort"
"strconv"
+ "github.com/rancher/k3s/pkg/version"
appclient "github.com/rancher/wrangler-api/pkg/generated/controllers/apps/v1"
coreclient "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/apply"
@@ -26,12 +27,15 @@ import (
coregetter "k8s.io/client-go/kubernetes/typed/core/v1"
)
+var (
+ svcNameLabel = "svccontroller." + version.Program + ".cattle.io/svcname"
+ daemonsetNodeLabel = "svccontroller." + version.Program + ".cattle.io/enablelb"
+ nodeSelectorLabel = "svccontroller." + version.Program + ".cattle.io/nodeselector"
+)
+
const (
- image = "rancher/klipper-lb:v0.1.2"
- svcNameLabel = "svccontroller.k3s.cattle.io/svcname"
- daemonsetNodeLabel = "svccontroller.k3s.cattle.io/enablelb"
- nodeSelectorLabel = "svccontroller.k3s.cattle.io/nodeselector"
- Ready = condition.Cond("Ready")
+ image = "rancher/klipper-lb:v0.1.2"
+ Ready = condition.Cond("Ready")
)
var (
diff --git a/pkg/version/version.go b/pkg/version/version.go
index a645beef9eb7..8603afce179f 100644
--- a/pkg/version/version.go
+++ b/pkg/version/version.go
@@ -1,6 +1,10 @@
package version
+import "strings"
+
var (
- Version = "dev"
- GitCommit = "HEAD"
+ Program = "k3s"
+ ProgramUpper = strings.ToUpper("k3s")
+ Version = "dev"
+ GitCommit = "HEAD"
)
diff --git a/scripts/build b/scripts/build
index 4ca6333c4a84..66416ca62736 100755
--- a/scripts/build
+++ b/scripts/build
@@ -39,15 +39,10 @@ STATIC="
-extldflags '-static'
"
-if [ "$DQLITE" = "true" ]; then
- DQLITE_TAGS="dqlite"
- DQLITE_STATIC_SQLITE="-luv -lraft -lco"
-fi
-
STATIC_SQLITE="
- -extldflags '-static -lm -ldl -lz -lpthread $DQLITE_STATIC_SQLITE'
+ -extldflags '-static -lm -ldl -lz -lpthread'
"
-TAGS="ctrd apparmor seccomp no_btrfs netcgo osusergo providerless $DQLITE_TAGS"
+TAGS="ctrd apparmor seccomp no_btrfs netcgo osusergo providerless"
RUNC_TAGS="apparmor seccomp"
RUNC_STATIC="static"
diff --git a/scripts/package-cli b/scripts/package-cli
index ee559101873e..d73fc1ec3f01 100755
--- a/scripts/package-cli
+++ b/scripts/package-cli
@@ -56,9 +56,6 @@ LDFLAGS="
-w -s
"
STATIC="-extldflags '-static'"
-if [ "$DQLITE" = 'true' ]; then
- DQLITE_TAGS='dqlite'
-fi
-CGO_ENABLED=0 "${GO}" build -tags "$DQLITE_TAGS" -ldflags "$LDFLAGS $STATIC" -o ${CMD_NAME} ./cmd/k3s/main.go
+CGO_ENABLED=0 "${GO}" build -ldflags "$LDFLAGS $STATIC" -o ${CMD_NAME} ./cmd/k3s/main.go
./scripts/build-upload ${CMD_NAME} ${COMMIT}
diff --git a/scripts/provision/generic/alpine310/vagrant b/scripts/provision/generic/alpine310/vagrant
index ecd6898989d2..aec65b1749f4 100755
--- a/scripts/provision/generic/alpine310/vagrant
+++ b/scripts/provision/generic/alpine310/vagrant
@@ -3,11 +3,9 @@ set -ve
apk add -q -f curl libc6-compat tzdata
download_go
-download_dqlite
# ---
cat </etc/profile.d/build.sh
export SELINUX=true
-export DQLITE=true
export STATIC_BUILD=true
EOF
. /etc/profile.d/build.sh
@@ -28,4 +26,4 @@ EOF
else
echo "Using host docker server v$(cat /tmp/docker-server-version)"
fi
-)
\ No newline at end of file
+)
diff --git a/scripts/provision/generic/centos7/vagrant b/scripts/provision/generic/centos7/vagrant
index 448d59edd4fe..bf88a92b629a 100755
--- a/scripts/provision/generic/centos7/vagrant
+++ b/scripts/provision/generic/centos7/vagrant
@@ -5,7 +5,6 @@ download_go
# ---
cat </etc/profile.d/build.sh
export SELINUX=true
-# export DQLITE=true
# export STATIC_BUILD=true
EOF
. /etc/profile.d/build.sh
diff --git a/scripts/provision/generic/ubuntu1804/vagrant b/scripts/provision/generic/ubuntu1804/vagrant
index 4cb6b0c771a7..fd784a05ccb4 100755
--- a/scripts/provision/generic/ubuntu1804/vagrant
+++ b/scripts/provision/generic/ubuntu1804/vagrant
@@ -5,7 +5,6 @@ download_go
# ---
cat </etc/profile.d/build.sh
export SELINUX=true
-# export DQLITE=true
# export STATIC_BUILD=true
EOF
. /etc/profile.d/build.sh
diff --git a/scripts/provision/vagrant b/scripts/provision/vagrant
index 7bb0e1ec0f48..70adbe3a6154 100755
--- a/scripts/provision/vagrant
+++ b/scripts/provision/vagrant
@@ -79,18 +79,6 @@ download_go() {
curl -sL https://storage.googleapis.com/golang/go${goversion}.linux-${ARCH}.tar.gz | tar -xzf - -C /usr/local
}
-# --- Utility function to download dqlite
-download_dqlite() {
- dqliteURL="https://github.com/$(grep dqlite-build Dockerfile.dapper | sed -e 's/^.*--from=\([^ ]*\).*$/\1/' -e 's|:|/releases/download/|')/dqlite-$ARCH.tgz"
- if [ -z "$dqliteURL" ]; then
- echo 'Cannot find dqlite URL to fetch'
- return 1
- fi
- mkdir -p /usr/src/
- echo "Downloading DQLITE from $dqliteURL"
- curl -sL $dqliteURL -o /usr/src/dqlite.tgz
-}
-
# --- Run vagrant provision script if available
if [ ! -f "${PROVISION}" ]; then
echo "WARNING: Unable to execute provision script \"${PROVISION}\""
diff --git a/scripts/test b/scripts/test
index 16335434c58c..795eefc2d7c3 100755
--- a/scripts/test
+++ b/scripts/test
@@ -27,4 +27,3 @@ E2E_OUTPUT=$artifacts test-run-sonobuoy
test-run-sonobuoy mysql
test-run-sonobuoy postgres
-# test-run-sonobuoy dqlite
\ No newline at end of file
diff --git a/vendor/github.com/lxc/lxd/COPYING b/vendor/github.com/coreos/go-semver/LICENSE
similarity index 100%
rename from vendor/github.com/lxc/lxd/COPYING
rename to vendor/github.com/coreos/go-semver/LICENSE
diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE
new file mode 100644
index 000000000000..23a0ada2fbb5
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2018 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
new file mode 100644
index 000000000000..76cf4852c769
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -0,0 +1,296 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Semantic Versions http://semver.org
+package semver
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type Version struct {
+ Major int64
+ Minor int64
+ Patch int64
+ PreRelease PreRelease
+ Metadata string
+}
+
+type PreRelease string
+
+func splitOff(input *string, delim string) (val string) {
+ parts := strings.SplitN(*input, delim, 2)
+
+ if len(parts) == 2 {
+ *input = parts[0]
+ val = parts[1]
+ }
+
+ return val
+}
+
+func New(version string) *Version {
+ return Must(NewVersion(version))
+}
+
+func NewVersion(version string) (*Version, error) {
+ v := Version{}
+
+ if err := v.Set(version); err != nil {
+ return nil, err
+ }
+
+ return &v, nil
+}
+
+// Must is a helper for wrapping NewVersion and will panic if err is not nil.
+func Must(v *Version, err error) *Version {
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+// Set parses and updates v from the given version string. Implements flag.Value
+func (v *Version) Set(version string) error {
+ metadata := splitOff(&version, "+")
+ preRelease := PreRelease(splitOff(&version, "-"))
+ dotParts := strings.SplitN(version, ".", 3)
+
+ if len(dotParts) != 3 {
+ return fmt.Errorf("%s is not in dotted-tri format", version)
+ }
+
+ if err := validateIdentifier(string(preRelease)); err != nil {
+ return fmt.Errorf("failed to validate pre-release: %v", err)
+ }
+
+ if err := validateIdentifier(metadata); err != nil {
+ return fmt.Errorf("failed to validate metadata: %v", err)
+ }
+
+ parsed := make([]int64, 3, 3)
+
+ for i, v := range dotParts[:3] {
+ val, err := strconv.ParseInt(v, 10, 64)
+ parsed[i] = val
+ if err != nil {
+ return err
+ }
+ }
+
+ v.Metadata = metadata
+ v.PreRelease = preRelease
+ v.Major = parsed[0]
+ v.Minor = parsed[1]
+ v.Patch = parsed[2]
+ return nil
+}
+
+func (v Version) String() string {
+ var buffer bytes.Buffer
+
+ fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
+
+ if v.PreRelease != "" {
+ fmt.Fprintf(&buffer, "-%s", v.PreRelease)
+ }
+
+ if v.Metadata != "" {
+ fmt.Fprintf(&buffer, "+%s", v.Metadata)
+ }
+
+ return buffer.String()
+}
+
+func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var data string
+ if err := unmarshal(&data); err != nil {
+ return err
+ }
+ return v.Set(data)
+}
+
+func (v Version) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + v.String() + `"`), nil
+}
+
+func (v *Version) UnmarshalJSON(data []byte) error {
+ l := len(data)
+ if l == 0 || string(data) == `""` {
+ return nil
+ }
+ if l < 2 || data[0] != '"' || data[l-1] != '"' {
+ return errors.New("invalid semver string")
+ }
+ return v.Set(string(data[1 : l-1]))
+}
+
+// Compare tests if v is less than, equal to, or greater than versionB,
+// returning -1, 0, or +1 respectively.
+func (v Version) Compare(versionB Version) int {
+ if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
+ return cmp
+ }
+ return preReleaseCompare(v, versionB)
+}
+
+// Equal tests if v is equal to versionB.
+func (v Version) Equal(versionB Version) bool {
+ return v.Compare(versionB) == 0
+}
+
+// LessThan tests if v is less than versionB.
+func (v Version) LessThan(versionB Version) bool {
+ return v.Compare(versionB) < 0
+}
+
+// Slice converts the comparable parts of the semver into a slice of integers.
+func (v Version) Slice() []int64 {
+ return []int64{v.Major, v.Minor, v.Patch}
+}
+
+func (p PreRelease) Slice() []string {
+ preRelease := string(p)
+ return strings.Split(preRelease, ".")
+}
+
+func preReleaseCompare(versionA Version, versionB Version) int {
+ a := versionA.PreRelease
+ b := versionB.PreRelease
+
+ /* Handle the case where if two versions are otherwise equal it is the
+ * one without a PreRelease that is greater */
+ if len(a) == 0 && (len(b) > 0) {
+ return 1
+ } else if len(b) == 0 && (len(a) > 0) {
+ return -1
+ }
+
+ // If there is a prerelease, check and compare each part.
+ return recursivePreReleaseCompare(a.Slice(), b.Slice())
+}
+
+func recursiveCompare(versionA []int64, versionB []int64) int {
+ if len(versionA) == 0 {
+ return 0
+ }
+
+ a := versionA[0]
+ b := versionB[0]
+
+ if a > b {
+ return 1
+ } else if a < b {
+ return -1
+ }
+
+ return recursiveCompare(versionA[1:], versionB[1:])
+}
+
+func recursivePreReleaseCompare(versionA []string, versionB []string) int {
+ // A larger set of pre-release fields has a higher precedence than a smaller set,
+ // if all of the preceding identifiers are equal.
+ if len(versionA) == 0 {
+ if len(versionB) > 0 {
+ return -1
+ }
+ return 0
+ } else if len(versionB) == 0 {
+ // We're longer than versionB so return 1.
+ return 1
+ }
+
+ a := versionA[0]
+ b := versionB[0]
+
+ aInt := false
+ bInt := false
+
+ aI, err := strconv.Atoi(versionA[0])
+ if err == nil {
+ aInt = true
+ }
+
+ bI, err := strconv.Atoi(versionB[0])
+ if err == nil {
+ bInt = true
+ }
+
+ // Numeric identifiers always have lower precedence than non-numeric identifiers.
+ if aInt && !bInt {
+ return -1
+ } else if !aInt && bInt {
+ return 1
+ }
+
+ // Handle Integer Comparison
+ if aInt && bInt {
+ if aI > bI {
+ return 1
+ } else if aI < bI {
+ return -1
+ }
+ }
+
+ // Handle String Comparison
+ if a > b {
+ return 1
+ } else if a < b {
+ return -1
+ }
+
+ return recursivePreReleaseCompare(versionA[1:], versionB[1:])
+}
+
+// BumpMajor increments the Major field by 1 and resets all other fields to their default values
+func (v *Version) BumpMajor() {
+ v.Major += 1
+ v.Minor = 0
+ v.Patch = 0
+ v.PreRelease = PreRelease("")
+ v.Metadata = ""
+}
+
+// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
+func (v *Version) BumpMinor() {
+ v.Minor += 1
+ v.Patch = 0
+ v.PreRelease = PreRelease("")
+ v.Metadata = ""
+}
+
+// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
+func (v *Version) BumpPatch() {
+ v.Patch += 1
+ v.PreRelease = PreRelease("")
+ v.Metadata = ""
+}
+
+// validateIdentifier makes sure the provided identifier satisfies semver spec
+func validateIdentifier(id string) error {
+ if id != "" && !reIdentifier.MatchString(id) {
+ return fmt.Errorf("%s is not a valid semver identifier", id)
+ }
+ return nil
+}
+
+// reIdentifier is a regular expression used to check that pre-release and metadata
+// identifiers satisfy the spec requirements
+var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go
new file mode 100644
index 000000000000..e256b41a5ddf
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/sort.go
@@ -0,0 +1,38 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semver
+
+import (
+ "sort"
+)
+
+type Versions []*Version
+
+func (s Versions) Len() int {
+ return len(s)
+}
+
+func (s Versions) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s Versions) Less(i, j int) bool {
+ return s[i].LessThan(*s[j])
+}
+
+// Sort sorts the given slice of Version
+func Sort(versions []*Version) {
+ sort.Sort(Versions(versions))
+}
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
new file mode 100644
index 000000000000..ba95cdd15c31
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+language: go
+go:
+ - 1.3.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - master
+matrix:
+ allow_failures:
+ - go: master
+ fast_finish: true
+install:
+ - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+ - go get -t -v ./...
+ - diff -u <(echo -n) <(gofmt -d -s .)
+ - go tool vet .
+ - go test -v -race ./...
diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE
new file mode 100644
index 000000000000..8d9a94a90680
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2005-2008 Dustin Sallings
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
new file mode 100644
index 000000000000..91b4ae56464b
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/README.markdown
@@ -0,0 +1,124 @@
+# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
+
+Just a few functions for helping humanize times and sizes.
+
+`go get` it as `github.com/dustin/go-humanize`, import it as
+`"github.com/dustin/go-humanize"`, use it as `humanize`.
+
+See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
+complete documentation.
+
+## Sizes
+
+This lets you take numbers like `82854982` and convert them to useful
+strings like, `83 MB` or `79 MiB` (whichever you prefer).
+
+Example:
+
+```go
+fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
+```
+
+## Times
+
+This lets you take a `time.Time` and spit it out in relative terms.
+For example, `12 seconds ago` or `3 days from now`.
+
+Example:
+
+```go
+fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
+```
+
+Thanks to Kyle Lemons for the time implementation from an IRC
+conversation one day. It's pretty neat.
+
+## Ordinals
+
+From a [mailing list discussion][odisc] where a user wanted to be able
+to label ordinals.
+
+ 0 -> 0th
+ 1 -> 1st
+ 2 -> 2nd
+ 3 -> 3rd
+ 4 -> 4th
+ [...]
+
+Example:
+
+```go
+fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
+```
+
+## Commas
+
+Want to shove commas into numbers? Be my guest.
+
+ 0 -> 0
+ 100 -> 100
+ 1000 -> 1,000
+ 1000000000 -> 1,000,000,000
+ -100000 -> -100,000
+
+Example:
+
+```go
+fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
+```
+
+## Ftoa
+
+Nicer float64 formatter that removes trailing zeros.
+
+```go
+fmt.Printf("%f", 2.24) // 2.240000
+fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
+fmt.Printf("%f", 2.0) // 2.000000
+fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
+```
+
+## SI notation
+
+Format numbers with [SI notation][sinotation].
+
+Example:
+
+```go
+humanize.SI(0.00000000223, "M") // 2.23 nM
+```
+
+## English-specific functions
+
+The following functions are in the `humanize/english` subpackage.
+
+### Plurals
+
+Simple English pluralization
+
+```go
+english.PluralWord(1, "object", "") // object
+english.PluralWord(42, "object", "") // objects
+english.PluralWord(2, "bus", "") // buses
+english.PluralWord(99, "locus", "loci") // loci
+
+english.Plural(1, "object", "") // 1 object
+english.Plural(42, "object", "") // 42 objects
+english.Plural(2, "bus", "") // 2 buses
+english.Plural(99, "locus", "loci") // 99 loci
+```
+
+### Word series
+
+Format comma-separated words lists with conjuctions:
+
+```go
+english.WordSeries([]string{"foo"}, "and") // foo
+english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
+english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
+
+english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
+```
+
+[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
+[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go
new file mode 100644
index 000000000000..f49dc337dcd7
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/big.go
@@ -0,0 +1,31 @@
+package humanize
+
+import (
+ "math/big"
+)
+
+// order of magnitude (to a max order)
+func oomm(n, b *big.Int, maxmag int) (float64, int) {
+ mag := 0
+ m := &big.Int{}
+ for n.Cmp(b) >= 0 {
+ n.DivMod(n, b, m)
+ mag++
+ if mag == maxmag && maxmag >= 0 {
+ break
+ }
+ }
+ return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
+
+// total order of magnitude
+// (same as above, but with no upper limit)
+func oom(n, b *big.Int) (float64, int) {
+ mag := 0
+ m := &big.Int{}
+ for n.Cmp(b) >= 0 {
+ n.DivMod(n, b, m)
+ mag++
+ }
+ return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
+}
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
new file mode 100644
index 000000000000..1a2bf6172392
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bigbytes.go
@@ -0,0 +1,173 @@
+package humanize
+
+import (
+ "fmt"
+ "math/big"
+ "strings"
+ "unicode"
+)
+
+var (
+ bigIECExp = big.NewInt(1024)
+
+ // BigByte is one byte in bit.Ints
+ BigByte = big.NewInt(1)
+ // BigKiByte is 1,024 bytes in bit.Ints
+ BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
+ // BigMiByte is 1,024 k bytes in bit.Ints
+ BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
+ // BigGiByte is 1,024 m bytes in bit.Ints
+ BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
+ // BigTiByte is 1,024 g bytes in bit.Ints
+ BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
+ // BigPiByte is 1,024 t bytes in bit.Ints
+ BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
+ // BigEiByte is 1,024 p bytes in bit.Ints
+ BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
+ // BigZiByte is 1,024 e bytes in bit.Ints
+ BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
+ // BigYiByte is 1,024 z bytes in bit.Ints
+ BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
+)
+
+var (
+ bigSIExp = big.NewInt(1000)
+
+ // BigSIByte is one SI byte in big.Ints
+ BigSIByte = big.NewInt(1)
+ // BigKByte is 1,000 SI bytes in big.Ints
+ BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
+ // BigMByte is 1,000 SI k bytes in big.Ints
+ BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
+ // BigGByte is 1,000 SI m bytes in big.Ints
+ BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
+ // BigTByte is 1,000 SI g bytes in big.Ints
+ BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
+ // BigPByte is 1,000 SI t bytes in big.Ints
+ BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
+ // BigEByte is 1,000 SI p bytes in big.Ints
+ BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
+ // BigZByte is 1,000 SI e bytes in big.Ints
+ BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
+ // BigYByte is 1,000 SI z bytes in big.Ints
+ BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
+)
+
+var bigBytesSizeTable = map[string]*big.Int{
+ "b": BigByte,
+ "kib": BigKiByte,
+ "kb": BigKByte,
+ "mib": BigMiByte,
+ "mb": BigMByte,
+ "gib": BigGiByte,
+ "gb": BigGByte,
+ "tib": BigTiByte,
+ "tb": BigTByte,
+ "pib": BigPiByte,
+ "pb": BigPByte,
+ "eib": BigEiByte,
+ "eb": BigEByte,
+ "zib": BigZiByte,
+ "zb": BigZByte,
+ "yib": BigYiByte,
+ "yb": BigYByte,
+ // Without suffix
+ "": BigByte,
+ "ki": BigKiByte,
+ "k": BigKByte,
+ "mi": BigMiByte,
+ "m": BigMByte,
+ "gi": BigGiByte,
+ "g": BigGByte,
+ "ti": BigTiByte,
+ "t": BigTByte,
+ "pi": BigPiByte,
+ "p": BigPByte,
+ "ei": BigEiByte,
+ "e": BigEByte,
+ "z": BigZByte,
+ "zi": BigZiByte,
+ "y": BigYByte,
+ "yi": BigYiByte,
+}
+
+var ten = big.NewInt(10)
+
+func humanateBigBytes(s, base *big.Int, sizes []string) string {
+ if s.Cmp(ten) < 0 {
+ return fmt.Sprintf("%d B", s)
+ }
+ c := (&big.Int{}).Set(s)
+ val, mag := oomm(c, base, len(sizes)-1)
+ suffix := sizes[mag]
+ f := "%.0f %s"
+ if val < 10 {
+ f = "%.1f %s"
+ }
+
+ return fmt.Sprintf(f, val, suffix)
+
+}
+
+// BigBytes produces a human readable representation of an SI size.
+//
+// See also: ParseBigBytes.
+//
+// BigBytes(82854982) -> 83 MB
+func BigBytes(s *big.Int) string {
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+ return humanateBigBytes(s, bigSIExp, sizes)
+}
+
+// BigIBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBigBytes.
+//
+// BigIBytes(82854982) -> 79 MiB
+func BigIBytes(s *big.Int) string {
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+ return humanateBigBytes(s, bigIECExp, sizes)
+}
+
+// ParseBigBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See also: BigBytes, BigIBytes.
+//
+// ParseBigBytes("42 MB") -> 42000000, nil
+// ParseBigBytes("42 mib") -> 44040192, nil
+func ParseBigBytes(s string) (*big.Int, error) {
+ lastDigit := 0
+ hasComma := false
+ for _, r := range s {
+ if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+ break
+ }
+ if r == ',' {
+ hasComma = true
+ }
+ lastDigit++
+ }
+
+ num := s[:lastDigit]
+ if hasComma {
+ num = strings.Replace(num, ",", "", -1)
+ }
+
+ val := &big.Rat{}
+ _, err := fmt.Sscanf(num, "%f", val)
+ if err != nil {
+ return nil, err
+ }
+
+ extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+ if m, ok := bigBytesSizeTable[extra]; ok {
+ mv := (&big.Rat{}).SetInt(m)
+ val.Mul(val, mv)
+ rv := &big.Int{}
+ rv.Div(val.Num(), val.Denom())
+ return rv, nil
+ }
+
+ return nil, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go
new file mode 100644
index 000000000000..0b498f4885c5
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/bytes.go
@@ -0,0 +1,143 @@
+package humanize
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+// IEC Sizes.
+// kibis of bits
+const (
+ Byte = 1 << (iota * 10)
+ KiByte
+ MiByte
+ GiByte
+ TiByte
+ PiByte
+ EiByte
+)
+
+// SI Sizes.
+const (
+ IByte = 1
+ KByte = IByte * 1000
+ MByte = KByte * 1000
+ GByte = MByte * 1000
+ TByte = GByte * 1000
+ PByte = TByte * 1000
+ EByte = PByte * 1000
+)
+
+var bytesSizeTable = map[string]uint64{
+ "b": Byte,
+ "kib": KiByte,
+ "kb": KByte,
+ "mib": MiByte,
+ "mb": MByte,
+ "gib": GiByte,
+ "gb": GByte,
+ "tib": TiByte,
+ "tb": TByte,
+ "pib": PiByte,
+ "pb": PByte,
+ "eib": EiByte,
+ "eb": EByte,
+ // Without suffix
+ "": Byte,
+ "ki": KiByte,
+ "k": KByte,
+ "mi": MiByte,
+ "m": MByte,
+ "gi": GiByte,
+ "g": GByte,
+ "ti": TiByte,
+ "t": TByte,
+ "pi": PiByte,
+ "p": PByte,
+ "ei": EiByte,
+ "e": EByte,
+}
+
+func logn(n, b float64) float64 {
+ return math.Log(n) / math.Log(b)
+}
+
+func humanateBytes(s uint64, base float64, sizes []string) string {
+ if s < 10 {
+ return fmt.Sprintf("%d B", s)
+ }
+ e := math.Floor(logn(float64(s), base))
+ suffix := sizes[int(e)]
+ val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
+ f := "%.0f %s"
+ if val < 10 {
+ f = "%.1f %s"
+ }
+
+ return fmt.Sprintf(f, val, suffix)
+}
+
+// Bytes produces a human readable representation of an SI size.
+//
+// See also: ParseBytes.
+//
+// Bytes(82854982) -> 83 MB
+func Bytes(s uint64) string {
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
+ return humanateBytes(s, 1000, sizes)
+}
+
+// IBytes produces a human readable representation of an IEC size.
+//
+// See also: ParseBytes.
+//
+// IBytes(82854982) -> 79 MiB
+func IBytes(s uint64) string {
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
+ return humanateBytes(s, 1024, sizes)
+}
+
+// ParseBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See Also: Bytes, IBytes.
+//
+// ParseBytes("42 MB") -> 42000000, nil
+// ParseBytes("42 mib") -> 44040192, nil
+func ParseBytes(s string) (uint64, error) {
+ lastDigit := 0
+ hasComma := false
+ for _, r := range s {
+ if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+ break
+ }
+ if r == ',' {
+ hasComma = true
+ }
+ lastDigit++
+ }
+
+ num := s[:lastDigit]
+ if hasComma {
+ num = strings.Replace(num, ",", "", -1)
+ }
+
+ f, err := strconv.ParseFloat(num, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+ if m, ok := bytesSizeTable[extra]; ok {
+ f *= float64(m)
+ if f >= math.MaxUint64 {
+ return 0, fmt.Errorf("too large: %v", s)
+ }
+ return uint64(f), nil
+ }
+
+ return 0, fmt.Errorf("unhandled size name: %v", extra)
+}
diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go
new file mode 100644
index 000000000000..520ae3e57d92
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/comma.go
@@ -0,0 +1,116 @@
+package humanize
+
+import (
+ "bytes"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+// Comma produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Comma(834142) -> 834,142
+func Comma(v int64) string {
+ sign := ""
+
+ // Min int64 can't be negated to a usable value, so it has to be special cased.
+ if v == math.MinInt64 {
+ return "-9,223,372,036,854,775,808"
+ }
+
+ if v < 0 {
+ sign = "-"
+ v = 0 - v
+ }
+
+ parts := []string{"", "", "", "", "", "", ""}
+ j := len(parts) - 1
+
+ for v > 999 {
+ parts[j] = strconv.FormatInt(v%1000, 10)
+ switch len(parts[j]) {
+ case 2:
+ parts[j] = "0" + parts[j]
+ case 1:
+ parts[j] = "00" + parts[j]
+ }
+ v = v / 1000
+ j--
+ }
+ parts[j] = strconv.Itoa(int(v))
+ return sign + strings.Join(parts[j:], ",")
+}
+
+// Commaf produces a string form of the given number in base 10 with
+// commas after every three orders of magnitude.
+//
+// e.g. Commaf(834142.32) -> 834,142.32
+func Commaf(v float64) string {
+ buf := &bytes.Buffer{}
+ if v < 0 {
+ buf.Write([]byte{'-'})
+ v = 0 - v
+ }
+
+ comma := []byte{','}
+
+ parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
+ pos := 0
+ if len(parts[0])%3 != 0 {
+ pos += len(parts[0]) % 3
+ buf.WriteString(parts[0][:pos])
+ buf.Write(comma)
+ }
+ for ; pos < len(parts[0]); pos += 3 {
+ buf.WriteString(parts[0][pos : pos+3])
+ buf.Write(comma)
+ }
+ buf.Truncate(buf.Len() - 1)
+
+ if len(parts) > 1 {
+ buf.Write([]byte{'.'})
+ buf.WriteString(parts[1])
+ }
+ return buf.String()
+}
+
+// CommafWithDigits works like the Commaf but limits the resulting
+// string to the given number of decimal places.
+//
+// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
+func CommafWithDigits(f float64, decimals int) string {
+ return stripTrailingDigits(Commaf(f), decimals)
+}
+
+// BigComma produces a string form of the given big.Int in base 10
+// with commas after every three orders of magnitude.
+func BigComma(b *big.Int) string {
+ sign := ""
+ if b.Sign() < 0 {
+ sign = "-"
+ b.Abs(b)
+ }
+
+ athousand := big.NewInt(1000)
+ c := (&big.Int{}).Set(b)
+ _, m := oom(c, athousand)
+ parts := make([]string, m+1)
+ j := len(parts) - 1
+
+ mod := &big.Int{}
+ for b.Cmp(athousand) >= 0 {
+ b.DivMod(b, athousand, mod)
+ parts[j] = strconv.FormatInt(mod.Int64(), 10)
+ switch len(parts[j]) {
+ case 2:
+ parts[j] = "0" + parts[j]
+ case 1:
+ parts[j] = "00" + parts[j]
+ }
+ j--
+ }
+ parts[j] = strconv.Itoa(int(b.Int64()))
+ return sign + strings.Join(parts[j:], ",")
+}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
new file mode 100644
index 000000000000..620690dec7dd
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/commaf.go
@@ -0,0 +1,40 @@
+// +build go1.6
+
+package humanize
+
+import (
+ "bytes"
+ "math/big"
+ "strings"
+)
+
+// BigCommaf produces a string form of the given big.Float in base 10
+// with commas after every three orders of magnitude.
+func BigCommaf(v *big.Float) string {
+ buf := &bytes.Buffer{}
+ if v.Sign() < 0 {
+ buf.Write([]byte{'-'})
+ v.Abs(v)
+ }
+
+ comma := []byte{','}
+
+ parts := strings.Split(v.Text('f', -1), ".")
+ pos := 0
+ if len(parts[0])%3 != 0 {
+ pos += len(parts[0]) % 3
+ buf.WriteString(parts[0][:pos])
+ buf.Write(comma)
+ }
+ for ; pos < len(parts[0]); pos += 3 {
+ buf.WriteString(parts[0][pos : pos+3])
+ buf.Write(comma)
+ }
+ buf.Truncate(buf.Len() - 1)
+
+ if len(parts) > 1 {
+ buf.Write([]byte{'.'})
+ buf.WriteString(parts[1])
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
new file mode 100644
index 000000000000..1c62b640d47c
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ftoa.go
@@ -0,0 +1,46 @@
+package humanize
+
+import (
+ "strconv"
+ "strings"
+)
+
+func stripTrailingZeros(s string) string {
+ offset := len(s) - 1
+ for offset > 0 {
+ if s[offset] == '.' {
+ offset--
+ break
+ }
+ if s[offset] != '0' {
+ break
+ }
+ offset--
+ }
+ return s[:offset+1]
+}
+
+func stripTrailingDigits(s string, digits int) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ if digits <= 0 {
+ return s[:i]
+ }
+ i++
+ if i+digits >= len(s) {
+ return s
+ }
+ return s[:i+digits]
+ }
+ return s
+}
+
+// Ftoa converts a float to a string with no trailing zeros.
+func Ftoa(num float64) string {
+ return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
+}
+
+// FtoaWithDigits converts a float to a string but limits the resulting string
+// to the given number of decimal places, and no trailing zeros.
+func FtoaWithDigits(num float64, digits int) string {
+ return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
+}
diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go
new file mode 100644
index 000000000000..a2c2da31ef1a
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/humanize.go
@@ -0,0 +1,8 @@
+/*
+Package humanize converts boring ugly numbers to human-friendly strings and back.
+
+Durations can be turned into strings such as "3 days ago", numbers
+representing sizes like 82854982 into useful strings like, "83 MB" or
+"79 MiB" (whichever you prefer).
+*/
+package humanize
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
new file mode 100644
index 000000000000..dec618659969
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/number.go
@@ -0,0 +1,192 @@
+package humanize
+
+/*
+Slightly adapted from the source to fit go-humanize.
+
+Author: https://github.com/gorhill
+Source: https://gist.github.com/gorhill/5285193
+
+*/
+
+import (
+ "math"
+ "strconv"
+)
+
+var (
+ renderFloatPrecisionMultipliers = [...]float64{
+ 1,
+ 10,
+ 100,
+ 1000,
+ 10000,
+ 100000,
+ 1000000,
+ 10000000,
+ 100000000,
+ 1000000000,
+ }
+
+ renderFloatPrecisionRounders = [...]float64{
+ 0.5,
+ 0.05,
+ 0.005,
+ 0.0005,
+ 0.00005,
+ 0.000005,
+ 0.0000005,
+ 0.00000005,
+ 0.000000005,
+ 0.0000000005,
+ }
+)
+
+// FormatFloat produces a formatted number as string based on the following user-specified criteria:
+// * thousands separator
+// * decimal separator
+// * decimal precision
+//
+// Usage: s := RenderFloat(format, n)
+// The format parameter tells how to render the number n.
+//
+// See examples: http://play.golang.org/p/LXc1Ddm1lJ
+//
+// Examples of format strings, given n = 12345.6789:
+// "#,###.##" => "12,345.67"
+// "#,###." => "12,345"
+// "#,###" => "12345,678"
+// "#\u202F###,##" => "12 345,68"
+// "#.###,###### => 12.345,678900
+// "" (aka default format) => 12,345.67
+//
+// The highest precision allowed is 9 digits after the decimal symbol.
+// There is also a version for integer number, FormatInteger(),
+// which is convenient for calls within template.
+func FormatFloat(format string, n float64) string {
+ // Special cases:
+ // NaN = "NaN"
+ // +Inf = "+Infinity"
+ // -Inf = "-Infinity"
+ if math.IsNaN(n) {
+ return "NaN"
+ }
+ if n > math.MaxFloat64 {
+ return "Infinity"
+ }
+ if n < -math.MaxFloat64 {
+ return "-Infinity"
+ }
+
+ // default format
+ precision := 2
+ decimalStr := "."
+ thousandStr := ","
+ positiveStr := ""
+ negativeStr := "-"
+
+ if len(format) > 0 {
+ format := []rune(format)
+
+ // If there is an explicit format directive,
+ // then default values are these:
+ precision = 9
+ thousandStr = ""
+
+ // collect indices of meaningful formatting directives
+ formatIndx := []int{}
+ for i, char := range format {
+ if char != '#' && char != '0' {
+ formatIndx = append(formatIndx, i)
+ }
+ }
+
+ if len(formatIndx) > 0 {
+ // Directive at index 0:
+ // Must be a '+'
+ // Raise an error if not the case
+ // index: 0123456789
+ // +0.000,000
+ // +000,000.0
+ // +0000.00
+ // +0000
+ if formatIndx[0] == 0 {
+ if format[formatIndx[0]] != '+' {
+ panic("RenderFloat(): invalid positive sign directive")
+ }
+ positiveStr = "+"
+ formatIndx = formatIndx[1:]
+ }
+
+ // Two directives:
+ // First is thousands separator
+ // Raise an error if not followed by 3-digit
+ // 0123456789
+ // 0.000,000
+ // 000,000.00
+ if len(formatIndx) == 2 {
+ if (formatIndx[1] - formatIndx[0]) != 4 {
+ panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers")
+ }
+ thousandStr = string(format[formatIndx[0]])
+ formatIndx = formatIndx[1:]
+ }
+
+ // One directive:
+ // Directive is decimal separator
+ // The number of digit-specifier following the separator indicates wanted precision
+ // 0123456789
+ // 0.00
+ // 000,0000
+ if len(formatIndx) == 1 {
+ decimalStr = string(format[formatIndx[0]])
+ precision = len(format) - formatIndx[0] - 1
+ }
+ }
+ }
+
+ // generate sign part
+ var signStr string
+ if n >= 0.000000001 {
+ signStr = positiveStr
+ } else if n <= -0.000000001 {
+ signStr = negativeStr
+ n = -n
+ } else {
+ signStr = ""
+ n = 0.0
+ }
+
+ // split number into integer and fractional parts
+ intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision])
+
+ // generate integer part string
+ intStr := strconv.FormatInt(int64(intf), 10)
+
+ // add thousand separator if required
+ if len(thousandStr) > 0 {
+ for i := len(intStr); i > 3; {
+ i -= 3
+ intStr = intStr[:i] + thousandStr + intStr[i:]
+ }
+ }
+
+ // no fractional part, we can leave now
+ if precision == 0 {
+ return signStr + intStr
+ }
+
+ // generate fractional part
+ fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision]))
+ // may need padding
+ if len(fracStr) < precision {
+ fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr
+ }
+
+ return signStr + intStr + decimalStr + fracStr
+}
+
+// FormatInteger produces a formatted number as string.
+// See FormatFloat.
+func FormatInteger(format string, n int) string {
+ return FormatFloat(format, float64(n))
+}
diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go
new file mode 100644
index 000000000000..43d88a861950
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/ordinals.go
@@ -0,0 +1,25 @@
+package humanize
+
+import "strconv"
+
+// Ordinal gives you the input number in a rank/ordinal format.
+//
+// Ordinal(3) -> 3rd
+func Ordinal(x int) string {
+ suffix := "th"
+ switch x % 10 {
+ case 1:
+ if x%100 != 11 {
+ suffix = "st"
+ }
+ case 2:
+ if x%100 != 12 {
+ suffix = "nd"
+ }
+ case 3:
+ if x%100 != 13 {
+ suffix = "rd"
+ }
+ }
+ return strconv.Itoa(x) + suffix
+}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
new file mode 100644
index 000000000000..ae659e0e4979
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/si.go
@@ -0,0 +1,123 @@
+package humanize
+
+import (
+ "errors"
+ "math"
+ "regexp"
+ "strconv"
+)
+
+var siPrefixTable = map[float64]string{
+ -24: "y", // yocto
+ -21: "z", // zepto
+ -18: "a", // atto
+ -15: "f", // femto
+ -12: "p", // pico
+ -9: "n", // nano
+ -6: "µ", // micro
+ -3: "m", // milli
+ 0: "",
+ 3: "k", // kilo
+ 6: "M", // mega
+ 9: "G", // giga
+ 12: "T", // tera
+ 15: "P", // peta
+ 18: "E", // exa
+ 21: "Z", // zetta
+ 24: "Y", // yotta
+}
+
+var revSIPrefixTable = revfmap(siPrefixTable)
+
+// revfmap reverses the map and precomputes the power multiplier
+func revfmap(in map[float64]string) map[string]float64 {
+ rv := map[string]float64{}
+ for k, v := range in {
+ rv[v] = math.Pow(10, k)
+ }
+ return rv
+}
+
+var riParseRegex *regexp.Regexp
+
+func init() {
+ ri := `^([\-0-9.]+)\s?([`
+ for _, v := range siPrefixTable {
+ ri += v
+ }
+ ri += `]?)(.*)`
+
+ riParseRegex = regexp.MustCompile(ri)
+}
+
+// ComputeSI finds the most appropriate SI prefix for the given number
+// and returns the prefix along with the value adjusted to be within
+// that prefix.
+//
+// See also: SI, ParseSI.
+//
+// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
+func ComputeSI(input float64) (float64, string) {
+ if input == 0 {
+ return 0, ""
+ }
+ mag := math.Abs(input)
+ exponent := math.Floor(logn(mag, 10))
+ exponent = math.Floor(exponent/3) * 3
+
+ value := mag / math.Pow(10, exponent)
+
+ // Handle special case where value is exactly 1000.0
+ // Should return 1 M instead of 1000 k
+ if value == 1000.0 {
+ exponent += 3
+ value = mag / math.Pow(10, exponent)
+ }
+
+ value = math.Copysign(value, input)
+
+ prefix := siPrefixTable[exponent]
+ return value, prefix
+}
+
+// SI returns a string with default formatting.
+//
+// SI uses Ftoa to format float value, removing trailing zeros.
+//
+// See also: ComputeSI, ParseSI.
+//
+// e.g. SI(1000000, "B") -> 1 MB
+// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
+func SI(input float64, unit string) string {
+ value, prefix := ComputeSI(input)
+ return Ftoa(value) + " " + prefix + unit
+}
+
+// SIWithDigits works like SI but limits the resulting string to the
+// given number of decimal places.
+//
+// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
+// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
+func SIWithDigits(input float64, decimals int, unit string) string {
+ value, prefix := ComputeSI(input)
+ return FtoaWithDigits(value, decimals) + " " + prefix + unit
+}
+
+var errInvalid = errors.New("invalid input")
+
+// ParseSI parses an SI string back into the number and unit.
+//
+// See also: SI, ComputeSI.
+//
+// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
+func ParseSI(input string) (float64, string, error) {
+ found := riParseRegex.FindStringSubmatch(input)
+ if len(found) != 4 {
+ return 0, "", errInvalid
+ }
+ mag := revSIPrefixTable[found[2]]
+ unit := found[3]
+
+ base, err := strconv.ParseFloat(found[1], 64)
+ return base * mag, unit, err
+}
diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go
new file mode 100644
index 000000000000..dd3fbf5efc0c
--- /dev/null
+++ b/vendor/github.com/dustin/go-humanize/times.go
@@ -0,0 +1,117 @@
+package humanize
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "time"
+)
+
+// Seconds-based time units
+const (
+ Day = 24 * time.Hour
+ Week = 7 * Day
+ Month = 30 * Day
+ Year = 12 * Month
+ LongTime = 37 * Year
+)
+
+// Time formats a time into a relative string.
+//
+// Time(someT) -> "3 weeks ago"
+func Time(then time.Time) string {
+ return RelTime(then, time.Now(), "ago", "from now")
+}
+
+// A RelTimeMagnitude struct contains a relative time point at which
+// the relative format of time will switch to a new format string. A
+// slice of these in ascending order by their "D" field is passed to
+// CustomRelTime to format durations.
+//
+// The Format field is a string that may contain a "%s" which will be
+// replaced with the appropriate signed label (e.g. "ago" or "from
+// now") and a "%d" that will be replaced by the quantity.
+//
+// The DivBy field is the amount of time the time difference must be
+// divided by in order to display correctly.
+//
+// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
+// DivBy should be time.Minute so whatever the duration is will be
+// expressed in minutes.
+type RelTimeMagnitude struct {
+ D time.Duration
+ Format string
+ DivBy time.Duration
+}
+
+var defaultMagnitudes = []RelTimeMagnitude{
+ {time.Second, "now", time.Second},
+ {2 * time.Second, "1 second %s", 1},
+ {time.Minute, "%d seconds %s", time.Second},
+ {2 * time.Minute, "1 minute %s", 1},
+ {time.Hour, "%d minutes %s", time.Minute},
+ {2 * time.Hour, "1 hour %s", 1},
+ {Day, "%d hours %s", time.Hour},
+ {2 * Day, "1 day %s", 1},
+ {Week, "%d days %s", Day},
+ {2 * Week, "1 week %s", 1},
+ {Month, "%d weeks %s", Week},
+ {2 * Month, "1 month %s", 1},
+ {Year, "%d months %s", Month},
+ {18 * Month, "1 year %s", 1},
+ {2 * Year, "2 years %s", 1},
+ {LongTime, "%d years %s", Year},
+ {math.MaxInt64, "a long while %s", 1},
+}
+
+// RelTime formats a time into a relative string.
+//
+// It takes two times and two labels. In addition to the generic time
+// delta string (e.g. 5 minutes), the labels are used applied so that
+// the label corresponding to the smaller time is applied.
+//
+// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
+func RelTime(a, b time.Time, albl, blbl string) string {
+ return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
+}
+
+// CustomRelTime formats a time into a relative string.
+//
+// It takes two times two labels and a table of relative time formats.
+// In addition to the generic time delta string (e.g. 5 minutes), the
+// labels are used applied so that the label corresponding to the
+// smaller time is applied.
+func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
+ lbl := albl
+ diff := b.Sub(a)
+
+ if a.After(b) {
+ lbl = blbl
+ diff = a.Sub(b)
+ }
+
+ n := sort.Search(len(magnitudes), func(i int) bool {
+ return magnitudes[i].D > diff
+ })
+
+ if n >= len(magnitudes) {
+ n = len(magnitudes) - 1
+ }
+ mag := magnitudes[n]
+ args := []interface{}{}
+ escaped := false
+ for _, ch := range mag.Format {
+ if escaped {
+ switch ch {
+ case 's':
+ args = append(args, lbl)
+ case 'd':
+ args = append(args, diff/mag.DivBy)
+ }
+ escaped = false
+ } else {
+ escaped = ch == '%'
+ }
+ }
+ return fmt.Sprintf(mag.Format, args...)
+}
diff --git a/vendor/github.com/flosch/pongo2/.gitattributes b/vendor/github.com/flosch/pongo2/.gitattributes
deleted file mode 100644
index fcadb2cf9791..000000000000
--- a/vendor/github.com/flosch/pongo2/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-* text eol=lf
diff --git a/vendor/github.com/flosch/pongo2/.travis.yml b/vendor/github.com/flosch/pongo2/.travis.yml
deleted file mode 100644
index e39e5d05286e..000000000000
--- a/vendor/github.com/flosch/pongo2/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-os:
- - linux
- - osx
-go:
- - 1.12
-script:
- - go test -v
diff --git a/vendor/github.com/flosch/pongo2/AUTHORS b/vendor/github.com/flosch/pongo2/AUTHORS
deleted file mode 100644
index 601697cfa91a..000000000000
--- a/vendor/github.com/flosch/pongo2/AUTHORS
+++ /dev/null
@@ -1,11 +0,0 @@
-Main author and maintainer of pongo2:
-
-* Florian Schlachter
-
-Contributors (in no specific order):
-
-* @romanoaugusto88
-* @vitalbh
-* @blaubaer
-
-Feel free to add yourself to the list or to modify your entry if you did a contribution.
diff --git a/vendor/github.com/flosch/pongo2/LICENSE b/vendor/github.com/flosch/pongo2/LICENSE
deleted file mode 100644
index e876f869054c..000000000000
--- a/vendor/github.com/flosch/pongo2/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013-2014 Florian Schlachter
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/flosch/pongo2/README.md b/vendor/github.com/flosch/pongo2/README.md
deleted file mode 100644
index f70f50254769..000000000000
--- a/vendor/github.com/flosch/pongo2/README.md
+++ /dev/null
@@ -1,273 +0,0 @@
-# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2
-
-[![Join the chat at https://gitter.im/flosch/pongo2](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/flosch/pongo2)
-[![GoDoc](https://godoc.org/github.com/flosch/pongo2?status.svg)](https://godoc.org/github.com/flosch/pongo2)
-[![Build Status](https://travis-ci.org/flosch/pongo2.svg?branch=master)](https://travis-ci.org/flosch/pongo2)
-[![Backers on Open Collective](https://opencollective.com/pongo2/backers/badge.svg)](#backers)
-[![Sponsors on Open Collective](https://opencollective.com/pongo2/sponsors/badge.svg)](#sponsors)
-
-pongo2 is the successor of [pongo](https://github.com/flosch/pongo), a Django-syntax like templating-language.
-
-Install/update using `go get` (no dependencies required by pongo2):
-```
-go get -u github.com/flosch/pongo2
-```
-
-Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)).
-
-## First impression of a template
-
-```HTML+Django
-Our admins and users
-{# This is a short example to give you a quick overview of pongo2's syntax. #}
-
-{% macro user_details(user, is_admin=false) %}
-
-
-
= 40) || (user.karma > calc_avg_karma(userlist)+5) %}
- class="karma-good"{% endif %}>
-
-
- {{ user }}
-
-
-
-
This user registered {{ user.register_date|naturaltime }}.
-
-
-
The user's biography:
-
{{ user.biography|markdown|truncatewords_html:15 }}
- read more
-
- {% if is_admin %}
This user is an admin!
{% endif %}
-
-{% endmacro %}
-
-
-
-
- Our admins
- {% for admin in adminlist %}
- {{ user_details(admin, true) }}
- {% endfor %}
-
- Our members
- {% for user in userlist %}
- {{ user_details(user) }}
- {% endfor %}
-
-
-```
-
-## Development status
-
-**Latest stable release**: v3.0 (`go get -u gopkg.in/flosch/pongo2.v3` / [`v3`](https://github.com/flosch/pongo2/tree/v3)-branch)
-
-**Current development**: v4 (`master`-branch)
-
-*Note*: With the release of pongo v4 the branch v2 will be deprecated.
-
-**Deprecated versions** (not supported anymore): v1
-
-| Topic | Status |
-| ------------------------------------ | -------------------------------------------------------------------------------------- |
-| Django version compatibility: | [1.7](https://docs.djangoproject.com/en/1.7/ref/templates/builtins/) |
-| *Missing* (planned) **filters**: | none ([hints](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3)) |
-| *Missing* (planned) **tags**: | none ([hints](https://github.com/flosch/pongo2/blob/master/tags.go#L3)) |
-
-Please also have a look on the [caveats](https://github.com/flosch/pongo2#caveats) and on the [official add-ons](https://github.com/flosch/pongo2#official).
-
-## Features (and new in pongo2)
-
- * Entirely rewritten from the ground-up.
- * [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl).
- * [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl).
- * [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser))
- * Additional features:
- * Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl))
- * [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters)
-
-## Recent API changes within pongo2
-
-If you're using the `master`-branch of pongo2, you might be interested in this section. Since pongo2 is still in development (even though there is a first stable release!), there could be (backwards-incompatible) API changes over time. To keep track of these and therefore make it painless for you to adapt your codebase, I'll list them here.
-
- * Function signature for tag execution changed: not taking a `bytes.Buffer` anymore; instead `Execute()`-functions are now taking a `TemplateWriter` interface.
- * Function signature for tag and filter parsing/execution changed (`error` return type changed to `*Error`).
- * `INodeEvaluator` has been removed and got replaced by `IEvaluator`. You can change your existing tags/filters by simply replacing the interface.
- * Two new helper functions: [`RenderTemplateFile()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateFile) and [`RenderTemplateString()`](https://godoc.org/github.com/flosch/pongo2#RenderTemplateString).
- * `Template.ExecuteRW()` is now [`Template.ExecuteWriter()`](https://godoc.org/github.com/flosch/pongo2#Template.ExecuteWriter)
- * `Template.Execute*()` functions do now take a `pongo2.Context` directly (no pointer anymore).
-
-## How you can help
-
- * Write [filters](https://github.com/flosch/pongo2/blob/master/filters_builtin.go#L3) / [tags] by forking pongo2 and sending pull requests
- * Write/improve code tests (use the following command to see what tests are missing: `go test -v -cover -covermode=count -coverprofile=cover.out && go tool cover -html=cover.out` or have a look on [gocover.io/github.com/flosch/pongo2](http://gocover.io/github.com/flosch/pongo2))
- * Write/improve template tests (see the `template_tests/` directory)
- * Write middleware, libraries and websites using pongo2. :-)
-
-# Documentation
-
-For a documentation on how the templating language works you can [head over to the Django documentation](https://docs.djangoproject.com/en/dev/topics/templates/). pongo2 aims to be compatible with it.
-
-You can access pongo2's API documentation on [godoc](https://godoc.org/github.com/flosch/pongo2).
-
-## Caveats
-
-### Filters
-
- * **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format).
- * **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`.
- * **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet.
-
-### Tags
-
- * **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`.
- * **now**: takes Go's time format (see **date** and **time**-filter).
-
-### Misc
-
- * **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it):
- `{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`.
-
-# Add-ons, libraries and helpers
-
-## Official
-
- * [ponginae](https://github.com/flosch/ponginae) - A web-framework for Go (using pongo2).
- * [pongo2-tools](https://github.com/flosch/pongo2-tools) - Official tools and helpers for pongo2
- * [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries.
-
-## 3rd-party
-
- * [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego).
- * [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2.
- * [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework.
- * [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates
- * [Build'n support for Iris' template engine](https://github.com/kataras/iris)
- * [pongo2gin](https://gitlab.com/go-box/pongo2gin) - alternative renderer for [gin](github.com/gin-gonic/gin) to use pongo2 templates
- * [pongo2-trans](https://github.com/digitalcrab/pongo2trans) - `trans`-tag implementation for internationalization
- * [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework.
- * [p2cli](https://github.com/wrouesnel/p2cli) - command line templating utility based on pongo2
-
-Please add your project to this list and send me a pull request when you've developed something nice for pongo2.
-
-# API-usage examples
-
-Please see the documentation for a full list of provided API methods.
-
-## A tiny example (template string)
-
-```Go
-// Compile the template first (i. e. creating the AST)
-tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
-if err != nil {
- panic(err)
-}
-// Now you can render the template with the given
-// pongo2.Context how often you want to.
-out, err := tpl.Execute(pongo2.Context{"name": "florian"})
-if err != nil {
- panic(err)
-}
-fmt.Println(out) // Output: Hello Florian!
-```
-
-## Example server-usage (template file)
-
-```Go
-package main
-
-import (
- "github.com/flosch/pongo2"
- "net/http"
-)
-
-// Pre-compiling the templates at application startup using the
-// little Must()-helper function (Must() will panic if FromFile()
-// or FromString() will return with an error - that's it).
-// It's faster to pre-compile it anywhere at startup and only
-// execute the template later.
-var tplExample = pongo2.Must(pongo2.FromFile("example.html"))
-
-func examplePage(w http.ResponseWriter, r *http.Request) {
- // Execute the template per HTTP request
- err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
-}
-
-func main() {
- http.HandleFunc("/", examplePage)
- http.ListenAndServe(":8080", nil)
-}
-```
-
-# Benchmark
-
-The benchmarks have been run on the my machine (`Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz`) using the command:
-
- go test -bench . -cpu 1,2,4,8
-
-All benchmarks are compiling (depends on the benchmark) and executing the `template_tests/complex.tpl` template.
-
-The results are:
-
- BenchmarkExecuteComplexWithSandboxActive 50000 60450 ns/op
- BenchmarkExecuteComplexWithSandboxActive-2 50000 56998 ns/op
- BenchmarkExecuteComplexWithSandboxActive-4 50000 60343 ns/op
- BenchmarkExecuteComplexWithSandboxActive-8 50000 64229 ns/op
- BenchmarkCompileAndExecuteComplexWithSandboxActive 10000 164410 ns/op
- BenchmarkCompileAndExecuteComplexWithSandboxActive-2 10000 156682 ns/op
- BenchmarkCompileAndExecuteComplexWithSandboxActive-4 10000 164821 ns/op
- BenchmarkCompileAndExecuteComplexWithSandboxActive-8 10000 171806 ns/op
- BenchmarkParallelExecuteComplexWithSandboxActive 50000 60428 ns/op
- BenchmarkParallelExecuteComplexWithSandboxActive-2 50000 31887 ns/op
- BenchmarkParallelExecuteComplexWithSandboxActive-4 100000 22810 ns/op
- BenchmarkParallelExecuteComplexWithSandboxActive-8 100000 18820 ns/op
- BenchmarkExecuteComplexWithoutSandbox 50000 56942 ns/op
- BenchmarkExecuteComplexWithoutSandbox-2 50000 56168 ns/op
- BenchmarkExecuteComplexWithoutSandbox-4 50000 57838 ns/op
- BenchmarkExecuteComplexWithoutSandbox-8 50000 60539 ns/op
- BenchmarkCompileAndExecuteComplexWithoutSandbox 10000 162086 ns/op
- BenchmarkCompileAndExecuteComplexWithoutSandbox-2 10000 159771 ns/op
- BenchmarkCompileAndExecuteComplexWithoutSandbox-4 10000 163826 ns/op
- BenchmarkCompileAndExecuteComplexWithoutSandbox-8 10000 169062 ns/op
- BenchmarkParallelExecuteComplexWithoutSandbox 50000 57152 ns/op
- BenchmarkParallelExecuteComplexWithoutSandbox-2 50000 30276 ns/op
- BenchmarkParallelExecuteComplexWithoutSandbox-4 100000 22065 ns/op
- BenchmarkParallelExecuteComplexWithoutSandbox-8 100000 18034 ns/op
-
-Benchmarked on October 2nd 2014.
-
-## Contributors
-
-This project exists thanks to all the people who contribute.
-
-
-
-## Backers
-
-Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/pongo2#backer)]
-
-
-
-
-## Sponsors
-
-Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/pongo2#sponsor)]
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/vendor/github.com/flosch/pongo2/context.go b/vendor/github.com/flosch/pongo2/context.go
deleted file mode 100644
index 2934d70a497f..000000000000
--- a/vendor/github.com/flosch/pongo2/context.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package pongo2
-
-import (
- "regexp"
-
- "github.com/juju/errors"
-)
-
-var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
-
-var autoescape = true
-
-func SetAutoescape(newValue bool) {
- autoescape = newValue
-}
-
-// A Context type provides constants, variables, instances or functions to a template.
-//
-// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
-// Currently, context["pongo2"] contains the following keys:
-// 1. version: returns the version string
-//
-// Template examples for accessing items from your context:
-// {{ myconstant }}
-// {{ myfunc("test", 42) }}
-// {{ user.name }}
-// {{ pongo2.version }}
-type Context map[string]interface{}
-
-func (c Context) checkForValidIdentifiers() *Error {
- for k, v := range c {
- if !reIdentifiers.MatchString(k) {
- return &Error{
- Sender: "checkForValidIdentifiers",
- OrigError: errors.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v),
- }
- }
- }
- return nil
-}
-
-// Update updates this context with the key/value-pairs from another context.
-func (c Context) Update(other Context) Context {
- for k, v := range other {
- c[k] = v
- }
- return c
-}
-
-// ExecutionContext contains all data important for the current rendering state.
-//
-// If you're writing a custom tag, your tag's Execute()-function will
-// have access to the ExecutionContext. This struct stores anything
-// about the current rendering process's Context including
-// the Context provided by the user (field Public).
-// You can safely use the Private context to provide data to the user's
-// template (like a 'forloop'-information). The Shared-context is used
-// to share data between tags. All ExecutionContexts share this context.
-//
-// Please be careful when accessing the Public data.
-// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
-//
-// To create your own execution context within tags, use the
-// NewChildExecutionContext(parent) function.
-type ExecutionContext struct {
- template *Template
-
- Autoescape bool
- Public Context
- Private Context
- Shared Context
-}
-
-var pongo2MetaContext = Context{
- "version": Version,
-}
-
-func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
- privateCtx := make(Context)
-
- // Make the pongo2-related funcs/vars available to the context
- privateCtx["pongo2"] = pongo2MetaContext
-
- return &ExecutionContext{
- template: tpl,
-
- Public: ctx,
- Private: privateCtx,
- Autoescape: autoescape,
- }
-}
-
-func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
- newctx := &ExecutionContext{
- template: parent.template,
-
- Public: parent.Public,
- Private: make(Context),
- Autoescape: parent.Autoescape,
- }
- newctx.Shared = parent.Shared
-
- // Copy all existing private items
- newctx.Private.Update(parent.Private)
-
- return newctx
-}
-
-func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
- return ctx.OrigError(errors.New(msg), token)
-}
-
-func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error {
- filename := ctx.template.name
- var line, col int
- if token != nil {
- // No tokens available
- // TODO: Add location (from where?)
- filename = token.Filename
- line = token.Line
- col = token.Col
- }
- return &Error{
- Template: ctx.template,
- Filename: filename,
- Line: line,
- Column: col,
- Token: token,
- Sender: "execution",
- OrigError: err,
- }
-}
-
-func (ctx *ExecutionContext) Logf(format string, args ...interface{}) {
- ctx.template.set.logf(format, args...)
-}
diff --git a/vendor/github.com/flosch/pongo2/doc.go b/vendor/github.com/flosch/pongo2/doc.go
deleted file mode 100644
index 5a23e2b2d842..000000000000
--- a/vendor/github.com/flosch/pongo2/doc.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// A Django-syntax like template-engine
-//
-// Blog posts about pongo2 (including introduction and migration):
-// https://www.florian-schlachter.de/?tag=pongo2
-//
-// Complete documentation on the template language:
-// https://docs.djangoproject.com/en/dev/topics/templates/
-//
-// Try out pongo2 live in the pongo2 playground:
-// https://www.florian-schlachter.de/pongo2/
-//
-// Make sure to read README.md in the repository as well.
-//
-// A tiny example with template strings:
-//
-// (Snippet on playground: https://www.florian-schlachter.de/pongo2/?id=1206546277)
-//
-// // Compile the template first (i. e. creating the AST)
-// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
-// if err != nil {
-// panic(err)
-// }
-// // Now you can render the template with the given
-// // pongo2.Context how often you want to.
-// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
-// if err != nil {
-// panic(err)
-// }
-// fmt.Println(out) // Output: Hello Fred!
-//
-package pongo2
diff --git a/vendor/github.com/flosch/pongo2/error.go b/vendor/github.com/flosch/pongo2/error.go
deleted file mode 100644
index 8aec8c10034d..000000000000
--- a/vendor/github.com/flosch/pongo2/error.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package pongo2
-
-import (
- "bufio"
- "fmt"
- "os"
-)
-
-// The Error type is being used to address an error during lexing, parsing or
-// execution. If you want to return an error object (for example in your own
-// tag or filter) fill this object with as much information as you have.
-// Make sure "Sender" is always given (if you're returning an error within
-// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
-// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
-type Error struct {
- Template *Template
- Filename string
- Line int
- Column int
- Token *Token
- Sender string
- OrigError error
-}
-
-func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
- if e.Template == nil {
- e.Template = template
- }
-
- if e.Token == nil {
- e.Token = t
- if e.Line <= 0 {
- e.Line = t.Line
- e.Column = t.Col
- }
- }
-
- return e
-}
-
-// Returns a nice formatted error string.
-func (e *Error) Error() string {
- s := "[Error"
- if e.Sender != "" {
- s += " (where: " + e.Sender + ")"
- }
- if e.Filename != "" {
- s += " in " + e.Filename
- }
- if e.Line > 0 {
- s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
- if e.Token != nil {
- s += fmt.Sprintf(" near '%s'", e.Token.Val)
- }
- }
- s += "] "
- s += e.OrigError.Error()
- return s
-}
-
-// RawLine returns the affected line from the original template, if available.
-func (e *Error) RawLine() (line string, available bool, outErr error) {
- if e.Line <= 0 || e.Filename == "" {
- return "", false, nil
- }
-
- filename := e.Filename
- if e.Template != nil {
- filename = e.Template.set.resolveFilename(e.Template, e.Filename)
- }
- file, err := os.Open(filename)
- if err != nil {
- return "", false, err
- }
- defer func() {
- err := file.Close()
- if err != nil && outErr == nil {
- outErr = err
- }
- }()
-
- scanner := bufio.NewScanner(file)
- l := 0
- for scanner.Scan() {
- l++
- if l == e.Line {
- return scanner.Text(), true, nil
- }
- }
- return "", false, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/filters.go b/vendor/github.com/flosch/pongo2/filters.go
deleted file mode 100644
index 1092705b0bf6..000000000000
--- a/vendor/github.com/flosch/pongo2/filters.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package pongo2
-
-import (
- "fmt"
-
- "github.com/juju/errors"
-)
-
-// FilterFunction is the type filter functions must fulfil
-type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
-
-var filters map[string]FilterFunction
-
-func init() {
- filters = make(map[string]FilterFunction)
-}
-
-// FilterExists returns true if the given filter is already registered
-func FilterExists(name string) bool {
- _, existing := filters[name]
- return existing
-}
-
-// RegisterFilter registers a new filter. If there's already a filter with the same
-// name, RegisterFilter will panic. You usually want to call this
-// function in the filter's init() function:
-// http://golang.org/doc/effective_go.html#init
-//
-// See http://www.florian-schlachter.de/post/pongo2/ for more about
-// writing filters and tags.
-func RegisterFilter(name string, fn FilterFunction) error {
- if FilterExists(name) {
- return errors.Errorf("filter with name '%s' is already registered", name)
- }
- filters[name] = fn
- return nil
-}
-
-// ReplaceFilter replaces an already registered filter with a new implementation. Use this
-// function with caution since it allows you to change existing filter behaviour.
-func ReplaceFilter(name string, fn FilterFunction) error {
- if !FilterExists(name) {
- return errors.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name)
- }
- filters[name] = fn
- return nil
-}
-
-// MustApplyFilter behaves like ApplyFilter, but panics on an error.
-func MustApplyFilter(name string, value *Value, param *Value) *Value {
- val, err := ApplyFilter(name, value, param)
- if err != nil {
- panic(err)
- }
- return val
-}
-
-// ApplyFilter applies a filter to a given value using the given parameters.
-// Returns a *pongo2.Value or an error.
-func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
- fn, existing := filters[name]
- if !existing {
- return nil, &Error{
- Sender: "applyfilter",
- OrigError: errors.Errorf("Filter with name '%s' not found.", name),
- }
- }
-
- // Make sure param is a *Value
- if param == nil {
- param = AsValue(nil)
- }
-
- return fn(value, param)
-}
-
-type filterCall struct {
- token *Token
-
- name string
- parameter IEvaluator
-
- filterFunc FilterFunction
-}
-
-func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
- var param *Value
- var err *Error
-
- if fc.parameter != nil {
- param, err = fc.parameter.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- } else {
- param = AsValue(nil)
- }
-
- filteredValue, err := fc.filterFunc(v, param)
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
- }
- return filteredValue, nil
-}
-
-// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
-func (p *Parser) parseFilter() (*filterCall, *Error) {
- identToken := p.MatchType(TokenIdentifier)
-
- // Check filter ident
- if identToken == nil {
- return nil, p.Error("Filter name must be an identifier.", nil)
- }
-
- filter := &filterCall{
- token: identToken,
- name: identToken.Val,
- }
-
- // Get the appropriate filter function and bind it
- filterFn, exists := filters[identToken.Val]
- if !exists {
- return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
- }
-
- filter.filterFunc = filterFn
-
- // Check for filter-argument (2 tokens needed: ':' ARG)
- if p.Match(TokenSymbol, ":") != nil {
- if p.Peek(TokenSymbol, "}}") != nil {
- return nil, p.Error("Filter parameter required after ':'.", nil)
- }
-
- // Get filter argument expression
- v, err := p.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- filter.parameter = v
- }
-
- return filter, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/filters_builtin.go b/vendor/github.com/flosch/pongo2/filters_builtin.go
deleted file mode 100644
index f02b4918ad27..000000000000
--- a/vendor/github.com/flosch/pongo2/filters_builtin.go
+++ /dev/null
@@ -1,927 +0,0 @@
-package pongo2
-
-/* Filters that are provided through github.com/flosch/pongo2-addons:
- ------------------------------------------------------------------
-
- filesizeformat
- slugify
- timesince
- timeuntil
-
- Filters that won't be added:
- ----------------------------
-
- get_static_prefix (reason: web-framework specific)
- pprint (reason: python-specific)
- static (reason: web-framework specific)
-
- Reconsideration (not implemented yet):
- --------------------------------------
-
- force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
- safeseq (reason: same reason as `force_escape`)
- unordered_list (python-specific; not sure whether needed or not)
- dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
- dictsortreversed (see dictsort)
-*/
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "net/url"
- "regexp"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-
- "github.com/juju/errors"
-)
-
-func init() {
- rand.Seed(time.Now().Unix())
-
- RegisterFilter("escape", filterEscape)
- RegisterFilter("safe", filterSafe)
- RegisterFilter("escapejs", filterEscapejs)
-
- RegisterFilter("add", filterAdd)
- RegisterFilter("addslashes", filterAddslashes)
- RegisterFilter("capfirst", filterCapfirst)
- RegisterFilter("center", filterCenter)
- RegisterFilter("cut", filterCut)
- RegisterFilter("date", filterDate)
- RegisterFilter("default", filterDefault)
- RegisterFilter("default_if_none", filterDefaultIfNone)
- RegisterFilter("divisibleby", filterDivisibleby)
- RegisterFilter("first", filterFirst)
- RegisterFilter("floatformat", filterFloatformat)
- RegisterFilter("get_digit", filterGetdigit)
- RegisterFilter("iriencode", filterIriencode)
- RegisterFilter("join", filterJoin)
- RegisterFilter("last", filterLast)
- RegisterFilter("length", filterLength)
- RegisterFilter("length_is", filterLengthis)
- RegisterFilter("linebreaks", filterLinebreaks)
- RegisterFilter("linebreaksbr", filterLinebreaksbr)
- RegisterFilter("linenumbers", filterLinenumbers)
- RegisterFilter("ljust", filterLjust)
- RegisterFilter("lower", filterLower)
- RegisterFilter("make_list", filterMakelist)
- RegisterFilter("phone2numeric", filterPhone2numeric)
- RegisterFilter("pluralize", filterPluralize)
- RegisterFilter("random", filterRandom)
- RegisterFilter("removetags", filterRemovetags)
- RegisterFilter("rjust", filterRjust)
- RegisterFilter("slice", filterSlice)
- RegisterFilter("split", filterSplit)
- RegisterFilter("stringformat", filterStringformat)
- RegisterFilter("striptags", filterStriptags)
- RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
- RegisterFilter("title", filterTitle)
- RegisterFilter("truncatechars", filterTruncatechars)
- RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
- RegisterFilter("truncatewords", filterTruncatewords)
- RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
- RegisterFilter("upper", filterUpper)
- RegisterFilter("urlencode", filterUrlencode)
- RegisterFilter("urlize", filterUrlize)
- RegisterFilter("urlizetrunc", filterUrlizetrunc)
- RegisterFilter("wordcount", filterWordcount)
- RegisterFilter("wordwrap", filterWordwrap)
- RegisterFilter("yesno", filterYesno)
-
- RegisterFilter("float", filterFloat) // pongo-specific
- RegisterFilter("integer", filterInteger) // pongo-specific
-}
-
-func filterTruncatecharsHelper(s string, newLen int) string {
- runes := []rune(s)
- if newLen < len(runes) {
- if newLen >= 3 {
- return fmt.Sprintf("%s...", string(runes[:newLen-3]))
- }
- // Not enough space for the ellipsis
- return string(runes[:newLen])
- }
- return string(runes)
-}
-
-func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
- vLen := len(value)
- var tagStack []string
- idx := 0
-
- for idx < vLen && !cond() {
- c, s := utf8.DecodeRuneInString(value[idx:])
- if c == utf8.RuneError {
- idx += s
- continue
- }
-
- if c == '<' {
- newOutput.WriteRune(c)
- idx += s // consume "<"
-
- if idx+1 < vLen {
- if value[idx] == '/' {
- // Close tag
-
- newOutput.WriteString("/")
-
- tag := ""
- idx++ // consume "/"
-
- for idx < vLen {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- // End of tag found
- if c2 == '>' {
- idx++ // consume ">"
- break
- }
- tag += string(c2)
- idx += size2
- }
-
- if len(tagStack) > 0 {
- // Ideally, the close tag is TOP of tag stack
- // In malformed HTML, it must not be, so iterate through the stack and remove the tag
- for i := len(tagStack) - 1; i >= 0; i-- {
- if tagStack[i] == tag {
- // Found the tag
- tagStack[i] = tagStack[len(tagStack)-1]
- tagStack = tagStack[:len(tagStack)-1]
- break
- }
- }
- }
-
- newOutput.WriteString(tag)
- newOutput.WriteString(">")
- } else {
- // Open tag
-
- tag := ""
-
- params := false
- for idx < vLen {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- newOutput.WriteRune(c2)
-
- // End of tag found
- if c2 == '>' {
- idx++ // consume ">"
- break
- }
-
- if !params {
- if c2 == ' ' {
- params = true
- } else {
- tag += string(c2)
- }
- }
-
- idx += size2
- }
-
- // Add tag to stack
- tagStack = append(tagStack, tag)
- }
- }
- } else {
- idx = fn(c, s, idx)
- }
- }
-
- finalize()
-
- for i := len(tagStack) - 1; i >= 0; i-- {
- tag := tagStack[i]
- // Close everything from the regular tag stack
- newOutput.WriteString(fmt.Sprintf("%s>", tag))
- }
-}
-
-func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- newLen := param.Integer()
- return AsValue(filterTruncatecharsHelper(s, newLen)), nil
-}
-
-func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
- value := in.String()
- newLen := max(param.Integer()-3, 0)
-
- newOutput := bytes.NewBuffer(nil)
-
- textcounter := 0
-
- filterTruncateHTMLHelper(value, newOutput, func() bool {
- return textcounter >= newLen
- }, func(c rune, s int, idx int) int {
- textcounter++
- newOutput.WriteRune(c)
-
- return idx + s
- }, func() {
- if textcounter >= newLen && textcounter < len(value) {
- newOutput.WriteString("...")
- }
- })
-
- return AsSafeValue(newOutput.String()), nil
-}
-
-func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
- words := strings.Fields(in.String())
- n := param.Integer()
- if n <= 0 {
- return AsValue(""), nil
- }
- nlen := min(len(words), n)
- out := make([]string, 0, nlen)
- for i := 0; i < nlen; i++ {
- out = append(out, words[i])
- }
-
- if n < len(words) {
- out = append(out, "...")
- }
-
- return AsValue(strings.Join(out, " ")), nil
-}
-
-func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
- value := in.String()
- newLen := max(param.Integer(), 0)
-
- newOutput := bytes.NewBuffer(nil)
-
- wordcounter := 0
-
- filterTruncateHTMLHelper(value, newOutput, func() bool {
- return wordcounter >= newLen
- }, func(_ rune, _ int, idx int) int {
- // Get next word
- wordFound := false
-
- for idx < len(value) {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- if c2 == '<' {
- // HTML tag start, don't consume it
- return idx
- }
-
- newOutput.WriteRune(c2)
- idx += size2
-
- if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
- // Word ends here, stop capturing it now
- break
- } else {
- wordFound = true
- }
- }
-
- if wordFound {
- wordcounter++
- }
-
- return idx
- }, func() {
- if wordcounter >= newLen {
- newOutput.WriteString("...")
- }
- })
-
- return AsSafeValue(newOutput.String()), nil
-}
-
-func filterEscape(in *Value, param *Value) (*Value, *Error) {
- output := strings.Replace(in.String(), "&", "&", -1)
- output = strings.Replace(output, ">", ">", -1)
- output = strings.Replace(output, "<", "<", -1)
- output = strings.Replace(output, "\"", """, -1)
- output = strings.Replace(output, "'", "'", -1)
- return AsValue(output), nil
-}
-
-func filterSafe(in *Value, param *Value) (*Value, *Error) {
- return in, nil // nothing to do here, just to keep track of the safe application
-}
-
-func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
- sin := in.String()
-
- var b bytes.Buffer
-
- idx := 0
- for idx < len(sin) {
- c, size := utf8.DecodeRuneInString(sin[idx:])
- if c == utf8.RuneError {
- idx += size
- continue
- }
-
- if c == '\\' {
- // Escape seq?
- if idx+1 < len(sin) {
- switch sin[idx+1] {
- case 'r':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
- idx += 2
- continue
- case 'n':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
- idx += 2
- continue
- /*case '\'':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
- idx += 2
- continue
- case '"':
- b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
- idx += 2
- continue*/
- }
- }
- }
-
- if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
- b.WriteRune(c)
- } else {
- b.WriteString(fmt.Sprintf(`\u%04X`, c))
- }
-
- idx += size
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterAdd(in *Value, param *Value) (*Value, *Error) {
- if in.IsNumber() && param.IsNumber() {
- if in.IsFloat() || param.IsFloat() {
- return AsValue(in.Float() + param.Float()), nil
- }
- return AsValue(in.Integer() + param.Integer()), nil
- }
- // If in/param is not a number, we're relying on the
- // Value's String() conversion and just add them both together
- return AsValue(in.String() + param.String()), nil
-}
-
-func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
- output := strings.Replace(in.String(), "\\", "\\\\", -1)
- output = strings.Replace(output, "\"", "\\\"", -1)
- output = strings.Replace(output, "'", "\\'", -1)
- return AsValue(output), nil
-}
-
-func filterCut(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
-}
-
-func filterLength(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Len()), nil
-}
-
-func filterLengthis(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Len() == param.Integer()), nil
-}
-
-func filterDefault(in *Value, param *Value) (*Value, *Error) {
- if !in.IsTrue() {
- return param, nil
- }
- return in, nil
-}
-
-func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
- if in.IsNil() {
- return param, nil
- }
- return in, nil
-}
-
-func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
- if param.Integer() == 0 {
- return AsValue(false), nil
- }
- return AsValue(in.Integer()%param.Integer() == 0), nil
-}
-
-func filterFirst(in *Value, param *Value) (*Value, *Error) {
- if in.CanSlice() && in.Len() > 0 {
- return in.Index(0), nil
- }
- return AsValue(""), nil
-}
-
-func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
- val := in.Float()
-
- decimals := -1
- if !param.IsNil() {
- // Any argument provided?
- decimals = param.Integer()
- }
-
- // if the argument is not a number (e. g. empty), the default
- // behaviour is trim the result
- trim := !param.IsNumber()
-
- if decimals <= 0 {
- // argument is negative or zero, so we
- // want the output being trimmed
- decimals = -decimals
- trim = true
- }
-
- if trim {
- // Remove zeroes
- if float64(int(val)) == val {
- return AsValue(in.Integer()), nil
- }
- }
-
- return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
-}
-
-func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
- i := param.Integer()
- l := len(in.String()) // do NOT use in.Len() here!
- if i <= 0 || i > l {
- return in, nil
- }
- return AsValue(in.String()[l-i] - 48), nil
-}
-
-const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
-
-func filterIriencode(in *Value, param *Value) (*Value, *Error) {
- var b bytes.Buffer
-
- sin := in.String()
- for _, r := range sin {
- if strings.IndexRune(filterIRIChars, r) >= 0 {
- b.WriteRune(r)
- } else {
- b.WriteString(url.QueryEscape(string(r)))
- }
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterJoin(in *Value, param *Value) (*Value, *Error) {
- if !in.CanSlice() {
- return in, nil
- }
- sep := param.String()
- sl := make([]string, 0, in.Len())
- for i := 0; i < in.Len(); i++ {
- sl = append(sl, in.Index(i).String())
- }
- return AsValue(strings.Join(sl, sep)), nil
-}
-
-func filterLast(in *Value, param *Value) (*Value, *Error) {
- if in.CanSlice() && in.Len() > 0 {
- return in.Index(in.Len() - 1), nil
- }
- return AsValue(""), nil
-}
-
-func filterUpper(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.ToUpper(in.String())), nil
-}
-
-func filterLower(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.ToLower(in.String())), nil
-}
-
-func filterMakelist(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- result := make([]string, 0, len(s))
- for _, c := range s {
- result = append(result, string(c))
- }
- return AsValue(result), nil
-}
-
-func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
- if in.Len() <= 0 {
- return AsValue(""), nil
- }
- t := in.String()
- r, size := utf8.DecodeRuneInString(t)
- return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
-}
-
-func filterCenter(in *Value, param *Value) (*Value, *Error) {
- width := param.Integer()
- slen := in.Len()
- if width <= slen {
- return in, nil
- }
-
- spaces := width - slen
- left := spaces/2 + spaces%2
- right := spaces / 2
-
- return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
- in.String(), strings.Repeat(" ", right))), nil
-}
-
-func filterDate(in *Value, param *Value) (*Value, *Error) {
- t, isTime := in.Interface().(time.Time)
- if !isTime {
- return nil, &Error{
- Sender: "filter:date",
- OrigError: errors.New("filter input argument must be of type 'time.Time'"),
- }
- }
- return AsValue(t.Format(param.String())), nil
-}
-
-func filterFloat(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Float()), nil
-}
-
-func filterInteger(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Integer()), nil
-}
-
-func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
- if in.Len() == 0 {
- return in, nil
- }
-
- var b bytes.Buffer
-
- // Newline =
- // Double newline = ...
- lines := strings.Split(in.String(), "\n")
- lenlines := len(lines)
-
- opened := false
-
- for idx, line := range lines {
-
- if !opened {
- b.WriteString("")
- opened = true
- }
-
- b.WriteString(line)
-
- if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
- // We've not reached the end
- if strings.TrimSpace(lines[idx+1]) == "" {
- // Next line is empty
- if opened {
- b.WriteString("
")
- opened = false
- }
- } else {
- b.WriteString("
")
- }
- }
- }
-
- if opened {
- b.WriteString("
")
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterSplit(in *Value, param *Value) (*Value, *Error) {
- chunks := strings.Split(in.String(), param.String())
-
- return AsValue(chunks), nil
-}
-
-func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.Replace(in.String(), "\n", "
", -1)), nil
-}
-
-func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
- lines := strings.Split(in.String(), "\n")
- output := make([]string, 0, len(lines))
- for idx, line := range lines {
- output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
- }
- return AsValue(strings.Join(output, "\n")), nil
-}
-
-func filterLjust(in *Value, param *Value) (*Value, *Error) {
- times := param.Integer() - in.Len()
- if times < 0 {
- times = 0
- }
- return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
-}
-
-func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
- return AsValue(url.QueryEscape(in.String())), nil
-}
-
-// TODO: This regexp could do some work
-var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
-var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
-
-func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) {
- var soutErr error
- sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
- var prefix string
- var suffix string
- if strings.HasPrefix(raw_url, " ") {
- prefix = " "
- }
- if strings.HasSuffix(raw_url, " ") {
- suffix = " "
- }
-
- raw_url = strings.TrimSpace(raw_url)
-
- t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
- if err != nil {
- soutErr = err
- return ""
- }
- url := t.String()
-
- if !strings.HasPrefix(url, "http") {
- url = fmt.Sprintf("http://%s", url)
- }
-
- title := raw_url
-
- if trunc > 3 && len(title) > trunc {
- title = fmt.Sprintf("%s...", title[:trunc-3])
- }
-
- if autoescape {
- t, err := ApplyFilter("escape", AsValue(title), nil)
- if err != nil {
- soutErr = err
- return ""
- }
- title = t.String()
- }
-
- return fmt.Sprintf(`%s%s%s`, prefix, url, title, suffix)
- })
- if soutErr != nil {
- return "", soutErr
- }
-
- sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
- title := mail
-
- if trunc > 3 && len(title) > trunc {
- title = fmt.Sprintf("%s...", title[:trunc-3])
- }
-
- return fmt.Sprintf(`%s`, mail, title)
- })
-
- return sout, nil
-}
-
-func filterUrlize(in *Value, param *Value) (*Value, *Error) {
- autoescape := true
- if param.IsBool() {
- autoescape = param.Bool()
- }
-
- s, err := filterUrlizeHelper(in.String(), autoescape, -1)
- if err != nil {
-
- }
-
- return AsValue(s), nil
-}
-
-func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
- s, err := filterUrlizeHelper(in.String(), true, param.Integer())
- if err != nil {
- return nil, &Error{
- Sender: "filter:urlizetrunc",
- OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
- }
- }
- return AsValue(s), nil
-}
-
-func filterStringformat(in *Value, param *Value) (*Value, *Error) {
- return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
-}
-
-var reStriptags = regexp.MustCompile("<[^>]*?>")
-
-func filterStriptags(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
-
- // Strip all tags
- s = reStriptags.ReplaceAllString(s, "")
-
- return AsValue(strings.TrimSpace(s)), nil
-}
-
-// https://en.wikipedia.org/wiki/Phoneword
-var filterPhone2numericMap = map[string]string{
- "a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
- "l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
- "w": "9", "x": "9", "y": "9", "z": "9",
-}
-
-func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
- sin := in.String()
- for k, v := range filterPhone2numericMap {
- sin = strings.Replace(sin, k, v, -1)
- sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
- }
- return AsValue(sin), nil
-}
-
-func filterPluralize(in *Value, param *Value) (*Value, *Error) {
- if in.IsNumber() {
- // Works only on numbers
- if param.Len() > 0 {
- endings := strings.Split(param.String(), ",")
- if len(endings) > 2 {
- return nil, &Error{
- Sender: "filter:pluralize",
- OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
- }
- }
- if len(endings) == 1 {
- // 1 argument
- if in.Integer() != 1 {
- return AsValue(endings[0]), nil
- }
- } else {
- if in.Integer() != 1 {
- // 2 arguments
- return AsValue(endings[1]), nil
- }
- return AsValue(endings[0]), nil
- }
- } else {
- if in.Integer() != 1 {
- // return default 's'
- return AsValue("s"), nil
- }
- }
-
- return AsValue(""), nil
- }
- return nil, &Error{
- Sender: "filter:pluralize",
- OrigError: errors.New("filter 'pluralize' does only work on numbers"),
- }
-}
-
-func filterRandom(in *Value, param *Value) (*Value, *Error) {
- if !in.CanSlice() || in.Len() <= 0 {
- return in, nil
- }
- i := rand.Intn(in.Len())
- return in.Index(i), nil
-}
-
-func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- tags := strings.Split(param.String(), ",")
-
- // Strip only specific tags
- for _, tag := range tags {
- re := regexp.MustCompile(fmt.Sprintf("?%s/?>", tag))
- s = re.ReplaceAllString(s, "")
- }
-
- return AsValue(strings.TrimSpace(s)), nil
-}
-
-func filterRjust(in *Value, param *Value) (*Value, *Error) {
- return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
-}
-
-func filterSlice(in *Value, param *Value) (*Value, *Error) {
- comp := strings.Split(param.String(), ":")
- if len(comp) != 2 {
- return nil, &Error{
- Sender: "filter:slice",
- OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"),
- }
- }
-
- if !in.CanSlice() {
- return in, nil
- }
-
- from := AsValue(comp[0]).Integer()
- to := in.Len()
-
- if from > to {
- from = to
- }
-
- vto := AsValue(comp[1]).Integer()
- if vto >= from && vto <= in.Len() {
- to = vto
- }
-
- return in.Slice(from, to), nil
-}
-
-func filterTitle(in *Value, param *Value) (*Value, *Error) {
- if !in.IsString() {
- return AsValue(""), nil
- }
- return AsValue(strings.Title(strings.ToLower(in.String()))), nil
-}
-
-func filterWordcount(in *Value, param *Value) (*Value, *Error) {
- return AsValue(len(strings.Fields(in.String()))), nil
-}
-
-func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
- words := strings.Fields(in.String())
- wordsLen := len(words)
- wrapAt := param.Integer()
- if wrapAt <= 0 {
- return in, nil
- }
-
- linecount := wordsLen/wrapAt + wordsLen%wrapAt
- lines := make([]string, 0, linecount)
- for i := 0; i < linecount; i++ {
- lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
- }
- return AsValue(strings.Join(lines, "\n")), nil
-}
-
-func filterYesno(in *Value, param *Value) (*Value, *Error) {
- choices := map[int]string{
- 0: "yes",
- 1: "no",
- 2: "maybe",
- }
- paramString := param.String()
- customChoices := strings.Split(paramString, ",")
- if len(paramString) > 0 {
- if len(customChoices) > 3 {
- return nil, &Error{
- Sender: "filter:yesno",
- OrigError: errors.Errorf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
- }
- }
- if len(customChoices) < 2 {
- return nil, &Error{
- Sender: "filter:yesno",
- OrigError: errors.Errorf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
- }
- }
-
- // Map to the options now
- choices[0] = customChoices[0]
- choices[1] = customChoices[1]
- if len(customChoices) == 3 {
- choices[2] = customChoices[2]
- }
- }
-
- // maybe
- if in.IsNil() {
- return AsValue(choices[2]), nil
- }
-
- // yes
- if in.IsTrue() {
- return AsValue(choices[0]), nil
- }
-
- // no
- return AsValue(choices[1]), nil
-}
diff --git a/vendor/github.com/flosch/pongo2/go.mod b/vendor/github.com/flosch/pongo2/go.mod
deleted file mode 100644
index 06b6c2566f43..000000000000
--- a/vendor/github.com/flosch/pongo2/go.mod
+++ /dev/null
@@ -1,13 +0,0 @@
-module github.com/flosch/pongo2
-
-require (
- github.com/go-check/check v0.0.0-20180628173108-788fd7840127
- github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5
- github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect
- github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073 // indirect
- github.com/kr/pretty v0.1.0 // indirect
- github.com/mattn/goveralls v0.0.2 // indirect
- golang.org/x/tools v0.0.0-20181221001348-537d06c36207 // indirect
- gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce // indirect
- gopkg.in/yaml.v2 v2.2.2 // indirect
-)
diff --git a/vendor/github.com/flosch/pongo2/helpers.go b/vendor/github.com/flosch/pongo2/helpers.go
deleted file mode 100644
index 880dbc044435..000000000000
--- a/vendor/github.com/flosch/pongo2/helpers.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package pongo2
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/flosch/pongo2/lexer.go b/vendor/github.com/flosch/pongo2/lexer.go
deleted file mode 100644
index 67b0b950237c..000000000000
--- a/vendor/github.com/flosch/pongo2/lexer.go
+++ /dev/null
@@ -1,432 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "strings"
- "unicode/utf8"
-
- "github.com/juju/errors"
-)
-
-const (
- TokenError = iota
- EOF
-
- TokenHTML
-
- TokenKeyword
- TokenIdentifier
- TokenString
- TokenNumber
- TokenSymbol
-)
-
-var (
- tokenSpaceChars = " \n\r\t"
- tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
- tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
- tokenDigits = "0123456789"
-
- // Available symbols in pongo2 (within filters/tag)
- TokenSymbols = []string{
- // 3-Char symbols
- "{{-", "-}}", "{%-", "-%}",
-
- // 2-Char symbols
- "==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
-
- // 1-Char symbol
- "(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%",
- }
-
- // Available keywords in pongo2
- TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
-)
-
-type TokenType int
-type Token struct {
- Filename string
- Typ TokenType
- Val string
- Line int
- Col int
- TrimWhitespaces bool
-}
-
-type lexerStateFn func() lexerStateFn
-type lexer struct {
- name string
- input string
- start int // start pos of the item
- pos int // current pos
- width int // width of last rune
- tokens []*Token
- errored bool
- startline int
- startcol int
- line int
- col int
-
- inVerbatim bool
- verbatimName string
-}
-
-func (t *Token) String() string {
- val := t.Val
- if len(val) > 1000 {
- val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:len(val)])
- }
-
- typ := ""
- switch t.Typ {
- case TokenHTML:
- typ = "HTML"
- case TokenError:
- typ = "Error"
- case TokenIdentifier:
- typ = "Identifier"
- case TokenKeyword:
- typ = "Keyword"
- case TokenNumber:
- typ = "Number"
- case TokenString:
- typ = "String"
- case TokenSymbol:
- typ = "Symbol"
- default:
- typ = "Unknown"
- }
-
- return fmt.Sprintf("",
- typ, t.Typ, val, t.Line, t.Col, t.TrimWhitespaces)
-}
-
-func lex(name string, input string) ([]*Token, *Error) {
- l := &lexer{
- name: name,
- input: input,
- tokens: make([]*Token, 0, 100),
- line: 1,
- col: 1,
- startline: 1,
- startcol: 1,
- }
- l.run()
- if l.errored {
- errtoken := l.tokens[len(l.tokens)-1]
- return nil, &Error{
- Filename: name,
- Line: errtoken.Line,
- Column: errtoken.Col,
- Sender: "lexer",
- OrigError: errors.New(errtoken.Val),
- }
- }
- return l.tokens, nil
-}
-
-func (l *lexer) value() string {
- return l.input[l.start:l.pos]
-}
-
-func (l *lexer) length() int {
- return l.pos - l.start
-}
-
-func (l *lexer) emit(t TokenType) {
- tok := &Token{
- Filename: l.name,
- Typ: t,
- Val: l.value(),
- Line: l.startline,
- Col: l.startcol,
- }
-
- if t == TokenString {
- // Escape sequence \" in strings
- tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
- tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
- }
-
- if t == TokenSymbol && len(tok.Val) == 3 && (strings.HasSuffix(tok.Val, "-") || strings.HasPrefix(tok.Val, "-")) {
- tok.TrimWhitespaces = true
- tok.Val = strings.Replace(tok.Val, "-", "", -1)
- }
-
- l.tokens = append(l.tokens, tok)
- l.start = l.pos
- l.startline = l.line
- l.startcol = l.col
-}
-
-func (l *lexer) next() rune {
- if l.pos >= len(l.input) {
- l.width = 0
- return EOF
- }
- r, w := utf8.DecodeRuneInString(l.input[l.pos:])
- l.width = w
- l.pos += l.width
- l.col += l.width
- return r
-}
-
-func (l *lexer) backup() {
- l.pos -= l.width
- l.col -= l.width
-}
-
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-func (l *lexer) ignore() {
- l.start = l.pos
- l.startline = l.line
- l.startcol = l.col
-}
-
-func (l *lexer) accept(what string) bool {
- if strings.IndexRune(what, l.next()) >= 0 {
- return true
- }
- l.backup()
- return false
-}
-
-func (l *lexer) acceptRun(what string) {
- for strings.IndexRune(what, l.next()) >= 0 {
- }
- l.backup()
-}
-
-func (l *lexer) errorf(format string, args ...interface{}) lexerStateFn {
- t := &Token{
- Filename: l.name,
- Typ: TokenError,
- Val: fmt.Sprintf(format, args...),
- Line: l.startline,
- Col: l.startcol,
- }
- l.tokens = append(l.tokens, t)
- l.errored = true
- l.startline = l.line
- l.startcol = l.col
- return nil
-}
-
-func (l *lexer) eof() bool {
- return l.start >= len(l.input)-1
-}
-
-func (l *lexer) run() {
- for {
- // TODO: Support verbatim tag names
- // https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
- if l.inVerbatim {
- name := l.verbatimName
- if name != "" {
- name += " "
- }
- if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- w := len("{% endverbatim %}")
- l.pos += w
- l.col += w
- l.ignore()
- l.inVerbatim = false
- }
- } else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- l.inVerbatim = true
- w := len("{% verbatim %}")
- l.pos += w
- l.col += w
- l.ignore()
- }
-
- if !l.inVerbatim {
- // Ignore single-line comments {# ... #}
- if strings.HasPrefix(l.input[l.pos:], "{#") {
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
-
- l.pos += 2 // pass '{#'
- l.col += 2
-
- for {
- switch l.peek() {
- case EOF:
- l.errorf("Single-line comment not closed.")
- return
- case '\n':
- l.errorf("Newline not permitted in a single-line comment.")
- return
- }
-
- if strings.HasPrefix(l.input[l.pos:], "#}") {
- l.pos += 2 // pass '#}'
- l.col += 2
- break
- }
-
- l.next()
- }
- l.ignore() // ignore whole comment
-
- // Comment skipped
- continue // next token
- }
-
- if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
- strings.HasPrefix(l.input[l.pos:], "{%") { // tag
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- l.tokenize()
- if l.errored {
- return
- }
- continue
- }
- }
-
- switch l.peek() {
- case '\n':
- l.line++
- l.col = 0
- }
- if l.next() == EOF {
- break
- }
- }
-
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
-
- if l.inVerbatim {
- l.errorf("verbatim-tag not closed, got EOF.")
- }
-}
-
-func (l *lexer) tokenize() {
- for state := l.stateCode; state != nil; {
- state = state()
- }
-}
-
-func (l *lexer) stateCode() lexerStateFn {
-outer_loop:
- for {
- switch {
- case l.accept(tokenSpaceChars):
- if l.value() == "\n" {
- return l.errorf("Newline not allowed within tag/variable.")
- }
- l.ignore()
- continue
- case l.accept(tokenIdentifierChars):
- return l.stateIdentifier
- case l.accept(tokenDigits):
- return l.stateNumber
- case l.accept(`"'`):
- return l.stateString
- }
-
- // Check for symbol
- for _, sym := range TokenSymbols {
- if strings.HasPrefix(l.input[l.start:], sym) {
- l.pos += len(sym)
- l.col += l.length()
- l.emit(TokenSymbol)
-
- if sym == "%}" || sym == "-%}" || sym == "}}" || sym == "-}}" {
- // Tag/variable end, return after emit
- return nil
- }
-
- continue outer_loop
- }
- }
-
- break
- }
-
- // Normal shut down
- return nil
-}
-
-func (l *lexer) stateIdentifier() lexerStateFn {
- l.acceptRun(tokenIdentifierChars)
- l.acceptRun(tokenIdentifierCharsWithDigits)
- for _, kw := range TokenKeywords {
- if kw == l.value() {
- l.emit(TokenKeyword)
- return l.stateCode
- }
- }
- l.emit(TokenIdentifier)
- return l.stateCode
-}
-
-func (l *lexer) stateNumber() lexerStateFn {
- l.acceptRun(tokenDigits)
- if l.accept(tokenIdentifierCharsWithDigits) {
- // This seems to be an identifier starting with a number.
- // See https://github.com/flosch/pongo2/issues/151
- return l.stateIdentifier()
- }
- /*
- Maybe context-sensitive number lexing?
- * comments.0.Text // first comment
- * usercomments.1.0 // second user, first comment
- * if (score >= 8.5) // 8.5 as a number
-
- if l.peek() == '.' {
- l.accept(".")
- if !l.accept(tokenDigits) {
- return l.errorf("Malformed number.")
- }
- l.acceptRun(tokenDigits)
- }
- */
- l.emit(TokenNumber)
- return l.stateCode
-}
-
-func (l *lexer) stateString() lexerStateFn {
- quotationMark := l.value()
- l.ignore()
- l.startcol-- // we're starting the position at the first "
- for !l.accept(quotationMark) {
- switch l.next() {
- case '\\':
- // escape sequence
- switch l.peek() {
- case '"', '\\':
- l.next()
- default:
- return l.errorf("Unknown escape sequence: \\%c", l.peek())
- }
- case EOF:
- return l.errorf("Unexpected EOF, string not closed.")
- case '\n':
- return l.errorf("Newline in string is not allowed.")
- }
- }
- l.backup()
- l.emit(TokenString)
-
- l.next()
- l.ignore()
-
- return l.stateCode
-}
diff --git a/vendor/github.com/flosch/pongo2/nodes.go b/vendor/github.com/flosch/pongo2/nodes.go
deleted file mode 100644
index 5b039cdf40ca..000000000000
--- a/vendor/github.com/flosch/pongo2/nodes.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package pongo2
-
-// The root document
-type nodeDocument struct {
- Nodes []INode
-}
-
-func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, n := range doc.Nodes {
- err := n.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/nodes_html.go b/vendor/github.com/flosch/pongo2/nodes_html.go
deleted file mode 100644
index c735defeb240..000000000000
--- a/vendor/github.com/flosch/pongo2/nodes_html.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package pongo2
-
-import (
- "strings"
-)
-
-type nodeHTML struct {
- token *Token
- trimLeft bool
- trimRight bool
-}
-
-func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- res := n.token.Val
- if n.trimLeft {
- res = strings.TrimLeft(res, tokenSpaceChars)
- }
- if n.trimRight {
- res = strings.TrimRight(res, tokenSpaceChars)
- }
- writer.WriteString(res)
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/nodes_wrapper.go b/vendor/github.com/flosch/pongo2/nodes_wrapper.go
deleted file mode 100644
index d1bcb8d851ff..000000000000
--- a/vendor/github.com/flosch/pongo2/nodes_wrapper.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package pongo2
-
-type NodeWrapper struct {
- Endtag string
- nodes []INode
-}
-
-func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, n := range wrapper.nodes {
- err := n.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/options.go b/vendor/github.com/flosch/pongo2/options.go
deleted file mode 100644
index 9c39e467ef66..000000000000
--- a/vendor/github.com/flosch/pongo2/options.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package pongo2
-
-// Options allow you to change the behavior of template-engine.
-// You can change the options before calling the Execute method.
-type Options struct {
- // If this is set to true the first newline after a block is removed (block, not variable tag!). Defaults to false.
- TrimBlocks bool
-
- // If this is set to true leading spaces and tabs are stripped from the start of a line to a block. Defaults to false
- LStripBlocks bool
-}
-
-func newOptions() *Options {
- return &Options{
- TrimBlocks: false,
- LStripBlocks: false,
- }
-}
-
-// Update updates this options from another options.
-func (opt *Options) Update(other *Options) *Options {
- opt.TrimBlocks = other.TrimBlocks
- opt.LStripBlocks = other.LStripBlocks
-
- return opt
-}
diff --git a/vendor/github.com/flosch/pongo2/parser.go b/vendor/github.com/flosch/pongo2/parser.go
deleted file mode 100644
index 2279e3c4962a..000000000000
--- a/vendor/github.com/flosch/pongo2/parser.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "strings"
-
- "github.com/juju/errors"
-)
-
-type INode interface {
- Execute(*ExecutionContext, TemplateWriter) *Error
-}
-
-type IEvaluator interface {
- INode
- GetPositionToken() *Token
- Evaluate(*ExecutionContext) (*Value, *Error)
- FilterApplied(name string) bool
-}
-
-// The parser provides you a comprehensive and easy tool to
-// work with the template document and arguments provided by
-// the user for your custom tag.
-//
-// The parser works on a token list which will be provided by pongo2.
-// A token is a unit you can work with. Tokens are either of type identifier,
-// string, number, keyword, HTML or symbol.
-//
-// (See Token's documentation for more about tokens)
-type Parser struct {
- name string
- idx int
- tokens []*Token
- lastToken *Token
-
- // if the parser parses a template document, here will be
- // a reference to it (needed to access the template through Tags)
- template *Template
-}
-
-// Creates a new parser to parse tokens.
-// Used inside pongo2 to parse documents and to provide an easy-to-use
-// parser for tag authors
-func newParser(name string, tokens []*Token, template *Template) *Parser {
- p := &Parser{
- name: name,
- tokens: tokens,
- template: template,
- }
- if len(tokens) > 0 {
- p.lastToken = tokens[len(tokens)-1]
- }
- return p
-}
-
-// Consume one token. It will be gone forever.
-func (p *Parser) Consume() {
- p.ConsumeN(1)
-}
-
-// Consume N tokens. They will be gone forever.
-func (p *Parser) ConsumeN(count int) {
- p.idx += count
-}
-
-// Returns the current token.
-func (p *Parser) Current() *Token {
- return p.Get(p.idx)
-}
-
-// Returns the CURRENT token if the given type matches.
-// Consumes this token on success.
-func (p *Parser) MatchType(typ TokenType) *Token {
- if t := p.PeekType(typ); t != nil {
- p.Consume()
- return t
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type AND value matches.
-// Consumes this token on success.
-func (p *Parser) Match(typ TokenType, val string) *Token {
- if t := p.Peek(typ, val); t != nil {
- p.Consume()
- return t
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type AND *one* of
-// the given values matches.
-// Consumes this token on success.
-func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
- for _, val := range vals {
- if t := p.Peek(typ, val); t != nil {
- p.Consume()
- return t
- }
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type matches.
-// It DOES NOT consume the token.
-func (p *Parser) PeekType(typ TokenType) *Token {
- return p.PeekTypeN(0, typ)
-}
-
-// Returns the CURRENT token if the given type AND value matches.
-// It DOES NOT consume the token.
-func (p *Parser) Peek(typ TokenType, val string) *Token {
- return p.PeekN(0, typ, val)
-}
-
-// Returns the CURRENT token if the given type AND *one* of
-// the given values matches.
-// It DOES NOT consume the token.
-func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
- for _, v := range vals {
- t := p.PeekN(0, typ, v)
- if t != nil {
- return t
- }
- }
- return nil
-}
-
-// Returns the tokens[current position + shift] token if the
-// given type AND value matches for that token.
-// DOES NOT consume the token.
-func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
- t := p.Get(p.idx + shift)
- if t != nil {
- if t.Typ == typ && t.Val == val {
- return t
- }
- }
- return nil
-}
-
-// Returns the tokens[current position + shift] token if the given type matches.
-// DOES NOT consume the token for that token.
-func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
- t := p.Get(p.idx + shift)
- if t != nil {
- if t.Typ == typ {
- return t
- }
- }
- return nil
-}
-
-// Returns the UNCONSUMED token count.
-func (p *Parser) Remaining() int {
- return len(p.tokens) - p.idx
-}
-
-// Returns the total token count.
-func (p *Parser) Count() int {
- return len(p.tokens)
-}
-
-// Returns tokens[i] or NIL (if i >= len(tokens))
-func (p *Parser) Get(i int) *Token {
- if i < len(p.tokens) && i >= 0 {
- return p.tokens[i]
- }
- return nil
-}
-
-// Returns tokens[current-position + shift] or NIL
-// (if (current-position + i) >= len(tokens))
-func (p *Parser) GetR(shift int) *Token {
- i := p.idx + shift
- return p.Get(i)
-}
-
-// Error produces a nice error message and returns an error-object.
-// The 'token'-argument is optional. If provided, it will take
-// the token's position information. If not provided, it will
-// automatically use the CURRENT token's position information.
-func (p *Parser) Error(msg string, token *Token) *Error {
- if token == nil {
- // Set current token
- token = p.Current()
- if token == nil {
- // Set to last token
- if len(p.tokens) > 0 {
- token = p.tokens[len(p.tokens)-1]
- }
- }
- }
- var line, col int
- if token != nil {
- line = token.Line
- col = token.Col
- }
- return &Error{
- Template: p.template,
- Filename: p.name,
- Sender: "parser",
- Line: line,
- Column: col,
- Token: token,
- OrigError: errors.New(msg),
- }
-}
-
-// Wraps all nodes between starting tag and "{% endtag %}" and provides
-// one simple interface to execute the wrapped nodes.
-// It returns a parser to process provided arguments to the tag.
-func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
- wrapper := &NodeWrapper{}
-
- var tagArgs []*Token
-
- for p.Remaining() > 0 {
- // New tag, check whether we have to stop wrapping here
- if p.Peek(TokenSymbol, "{%") != nil {
- tagIdent := p.PeekTypeN(1, TokenIdentifier)
-
- if tagIdent != nil {
- // We've found a (!) end-tag
-
- found := false
- for _, n := range names {
- if tagIdent.Val == n {
- found = true
- break
- }
- }
-
- // We only process the tag if we've found an end tag
- if found {
- // Okay, endtag found.
- p.ConsumeN(2) // '{%' tagname
-
- for {
- if p.Match(TokenSymbol, "%}") != nil {
- // Okay, end the wrapping here
- wrapper.Endtag = tagIdent.Val
- return wrapper, newParser(p.template.name, tagArgs, p.template), nil
- }
- t := p.Current()
- p.Consume()
- if t == nil {
- return nil, nil, p.Error("Unexpected EOF.", p.lastToken)
- }
- tagArgs = append(tagArgs, t)
- }
- }
- }
-
- }
-
- // Otherwise process next element to be wrapped
- node, err := p.parseDocElement()
- if err != nil {
- return nil, nil, err
- }
- wrapper.nodes = append(wrapper.nodes, node)
- }
-
- return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
- p.lastToken)
-}
-
-// Skips all nodes between starting tag and "{% endtag %}"
-func (p *Parser) SkipUntilTag(names ...string) *Error {
- for p.Remaining() > 0 {
- // New tag, check whether we have to stop wrapping here
- if p.Peek(TokenSymbol, "{%") != nil {
- tagIdent := p.PeekTypeN(1, TokenIdentifier)
-
- if tagIdent != nil {
- // We've found a (!) end-tag
-
- found := false
- for _, n := range names {
- if tagIdent.Val == n {
- found = true
- break
- }
- }
-
- // We only process the tag if we've found an end tag
- if found {
- // Okay, endtag found.
- p.ConsumeN(2) // '{%' tagname
-
- for {
- if p.Match(TokenSymbol, "%}") != nil {
- // Done skipping, exit.
- return nil
- }
- }
- }
- }
- }
- t := p.Current()
- p.Consume()
- if t == nil {
- return p.Error("Unexpected EOF.", p.lastToken)
- }
- }
-
- return p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), p.lastToken)
-}
diff --git a/vendor/github.com/flosch/pongo2/parser_document.go b/vendor/github.com/flosch/pongo2/parser_document.go
deleted file mode 100644
index e3ac2c8e9d89..000000000000
--- a/vendor/github.com/flosch/pongo2/parser_document.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package pongo2
-
-// Doc = { ( Filter | Tag | HTML ) }
-func (p *Parser) parseDocElement() (INode, *Error) {
- t := p.Current()
-
- switch t.Typ {
- case TokenHTML:
- n := &nodeHTML{token: t}
- left := p.PeekTypeN(-1, TokenSymbol)
- right := p.PeekTypeN(1, TokenSymbol)
- n.trimLeft = left != nil && left.TrimWhitespaces
- n.trimRight = right != nil && right.TrimWhitespaces
- p.Consume() // consume HTML element
- return n, nil
- case TokenSymbol:
- switch t.Val {
- case "{{":
- // parse variable
- variable, err := p.parseVariableElement()
- if err != nil {
- return nil, err
- }
- return variable, nil
- case "{%":
- // parse tag
- tag, err := p.parseTagElement()
- if err != nil {
- return nil, err
- }
- return tag, nil
- }
- }
- return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
-}
-
-func (tpl *Template) parse() *Error {
- tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
- doc, err := tpl.parser.parseDocument()
- if err != nil {
- return err
- }
- tpl.root = doc
- return nil
-}
-
-func (p *Parser) parseDocument() (*nodeDocument, *Error) {
- doc := &nodeDocument{}
-
- for p.Remaining() > 0 {
- node, err := p.parseDocElement()
- if err != nil {
- return nil, err
- }
- doc.Nodes = append(doc.Nodes, node)
- }
-
- return doc, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/parser_expression.go b/vendor/github.com/flosch/pongo2/parser_expression.go
deleted file mode 100644
index 1663ec461232..000000000000
--- a/vendor/github.com/flosch/pongo2/parser_expression.go
+++ /dev/null
@@ -1,503 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math"
-)
-
-type Expression struct {
- // TODO: Add location token?
- expr1 IEvaluator
- expr2 IEvaluator
- opToken *Token
-}
-
-type relationalExpression struct {
- // TODO: Add location token?
- expr1 IEvaluator
- expr2 IEvaluator
- opToken *Token
-}
-
-type simpleExpression struct {
- negate bool
- negativeSign bool
- term1 IEvaluator
- term2 IEvaluator
- opToken *Token
-}
-
-type term struct {
- // TODO: Add location token?
- factor1 IEvaluator
- factor2 IEvaluator
- opToken *Token
-}
-
-type power struct {
- // TODO: Add location token?
- power1 IEvaluator
- power2 IEvaluator
-}
-
-func (expr *Expression) FilterApplied(name string) bool {
- return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
- (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
-}
-
-func (expr *relationalExpression) FilterApplied(name string) bool {
- return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
- (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
-}
-
-func (expr *simpleExpression) FilterApplied(name string) bool {
- return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
- (expr.term2 != nil && expr.term2.FilterApplied(name)))
-}
-
-func (expr *term) FilterApplied(name string) bool {
- return expr.factor1.FilterApplied(name) && (expr.factor2 == nil ||
- (expr.factor2 != nil && expr.factor2.FilterApplied(name)))
-}
-
-func (expr *power) FilterApplied(name string) bool {
- return expr.power1.FilterApplied(name) && (expr.power2 == nil ||
- (expr.power2 != nil && expr.power2.FilterApplied(name)))
-}
-
-func (expr *Expression) GetPositionToken() *Token {
- return expr.expr1.GetPositionToken()
-}
-
-func (expr *relationalExpression) GetPositionToken() *Token {
- return expr.expr1.GetPositionToken()
-}
-
-func (expr *simpleExpression) GetPositionToken() *Token {
- return expr.term1.GetPositionToken()
-}
-
-func (expr *term) GetPositionToken() *Token {
- return expr.factor1.GetPositionToken()
-}
-
-func (expr *power) GetPositionToken() *Token {
- return expr.power1.GetPositionToken()
-}
-
-func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- v1, err := expr.expr1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.expr2 != nil {
- switch expr.opToken.Val {
- case "and", "&&":
- if !v1.IsTrue() {
- return AsValue(false), nil
- } else {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(v2.IsTrue()), nil
- }
- case "or", "||":
- if v1.IsTrue() {
- return AsValue(true), nil
- } else {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(v2.IsTrue()), nil
- }
- default:
- return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
- }
- } else {
- return v1, nil
- }
-}
-
-func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- v1, err := expr.expr1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.expr2 != nil {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "<=":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() <= v2.Float()), nil
- }
- return AsValue(v1.Integer() <= v2.Integer()), nil
- case ">=":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() >= v2.Float()), nil
- }
- return AsValue(v1.Integer() >= v2.Integer()), nil
- case "==":
- return AsValue(v1.EqualValueTo(v2)), nil
- case ">":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() > v2.Float()), nil
- }
- return AsValue(v1.Integer() > v2.Integer()), nil
- case "<":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() < v2.Float()), nil
- }
- return AsValue(v1.Integer() < v2.Integer()), nil
- case "!=", "<>":
- return AsValue(!v1.EqualValueTo(v2)), nil
- case "in":
- return AsValue(v2.Contains(v1)), nil
- default:
- return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
- }
- } else {
- return v1, nil
- }
-}
-
-func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- t1, err := expr.term1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- result := t1
-
- if expr.negate {
- result = result.Negate()
- }
-
- if expr.negativeSign {
- if result.IsNumber() {
- switch {
- case result.IsFloat():
- result = AsValue(-1 * result.Float())
- case result.IsInteger():
- result = AsValue(-1 * result.Integer())
- default:
- return nil, ctx.Error("Operation between a number and a non-(float/integer) is not possible", nil)
- }
- } else {
- return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
- }
- }
-
- if expr.term2 != nil {
- t2, err := expr.term2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "+":
- if result.IsFloat() || t2.IsFloat() {
- // Result will be a float
- return AsValue(result.Float() + t2.Float()), nil
- }
- // Result will be an integer
- return AsValue(result.Integer() + t2.Integer()), nil
- case "-":
- if result.IsFloat() || t2.IsFloat() {
- // Result will be a float
- return AsValue(result.Float() - t2.Float()), nil
- }
- // Result will be an integer
- return AsValue(result.Integer() - t2.Integer()), nil
- default:
- return nil, ctx.Error("Unimplemented", expr.GetPositionToken())
- }
- }
-
- return result, nil
-}
-
-func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- f1, err := expr.factor1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.factor2 != nil {
- f2, err := expr.factor2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "*":
- if f1.IsFloat() || f2.IsFloat() {
- // Result will be float
- return AsValue(f1.Float() * f2.Float()), nil
- }
- // Result will be int
- return AsValue(f1.Integer() * f2.Integer()), nil
- case "/":
- if f1.IsFloat() || f2.IsFloat() {
- // Result will be float
- return AsValue(f1.Float() / f2.Float()), nil
- }
- // Result will be int
- return AsValue(f1.Integer() / f2.Integer()), nil
- case "%":
- // Result will be int
- return AsValue(f1.Integer() % f2.Integer()), nil
- default:
- return nil, ctx.Error("unimplemented", expr.opToken)
- }
- } else {
- return f1, nil
- }
-}
-
-func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- p1, err := expr.power1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.power2 != nil {
- p2, err := expr.power2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(math.Pow(p1.Float(), p2.Float())), nil
- }
- return p1, nil
-}
-
-func (p *Parser) parseFactor() (IEvaluator, *Error) {
- if p.Match(TokenSymbol, "(") != nil {
- expr, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- if p.Match(TokenSymbol, ")") == nil {
- return nil, p.Error("Closing bracket expected after expression", nil)
- }
- return expr, nil
- }
-
- return p.parseVariableOrLiteralWithFilter()
-}
-
-func (p *Parser) parsePower() (IEvaluator, *Error) {
- pw := new(power)
-
- power1, err := p.parseFactor()
- if err != nil {
- return nil, err
- }
- pw.power1 = power1
-
- if p.Match(TokenSymbol, "^") != nil {
- power2, err := p.parsePower()
- if err != nil {
- return nil, err
- }
- pw.power2 = power2
- }
-
- if pw.power2 == nil {
- // Shortcut for faster evaluation
- return pw.power1, nil
- }
-
- return pw, nil
-}
-
-func (p *Parser) parseTerm() (IEvaluator, *Error) {
- returnTerm := new(term)
-
- factor1, err := p.parsePower()
- if err != nil {
- return nil, err
- }
- returnTerm.factor1 = factor1
-
- for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
- if returnTerm.opToken != nil {
- // Create new sub-term
- returnTerm = &term{
- factor1: returnTerm,
- }
- }
-
- op := p.Current()
- p.Consume()
-
- factor2, err := p.parsePower()
- if err != nil {
- return nil, err
- }
-
- returnTerm.opToken = op
- returnTerm.factor2 = factor2
- }
-
- if returnTerm.opToken == nil {
- // Shortcut for faster evaluation
- return returnTerm.factor1, nil
- }
-
- return returnTerm, nil
-}
-
-func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
- expr := new(simpleExpression)
-
- if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
- if sign.Val == "-" {
- expr.negativeSign = true
- }
- }
-
- if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
- expr.negate = true
- }
-
- term1, err := p.parseTerm()
- if err != nil {
- return nil, err
- }
- expr.term1 = term1
-
- for p.PeekOne(TokenSymbol, "+", "-") != nil {
- if expr.opToken != nil {
- // New sub expr
- expr = &simpleExpression{
- term1: expr,
- }
- }
-
- op := p.Current()
- p.Consume()
-
- term2, err := p.parseTerm()
- if err != nil {
- return nil, err
- }
-
- expr.term2 = term2
- expr.opToken = op
- }
-
- if expr.negate == false && expr.negativeSign == false && expr.term2 == nil {
- // Shortcut for faster evaluation
- return expr.term1, nil
- }
-
- return expr, nil
-}
-
-func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
- expr1, err := p.parseSimpleExpression()
- if err != nil {
- return nil, err
- }
-
- expr := &relationalExpression{
- expr1: expr1,
- }
-
- if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
- expr2, err := p.parseRelationalExpression()
- if err != nil {
- return nil, err
- }
- expr.opToken = t
- expr.expr2 = expr2
- } else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
- expr2, err := p.parseSimpleExpression()
- if err != nil {
- return nil, err
- }
- expr.opToken = t
- expr.expr2 = expr2
- }
-
- if expr.expr2 == nil {
- // Shortcut for faster evaluation
- return expr.expr1, nil
- }
-
- return expr, nil
-}
-
-func (p *Parser) ParseExpression() (IEvaluator, *Error) {
- rexpr1, err := p.parseRelationalExpression()
- if err != nil {
- return nil, err
- }
-
- exp := &Expression{
- expr1: rexpr1,
- }
-
- if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
- op := p.Current()
- p.Consume()
- expr2, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- exp.expr2 = expr2
- exp.opToken = op
- }
-
- if exp.expr2 == nil {
- // Shortcut for faster evaluation
- return exp.expr1, nil
- }
-
- return exp, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/pongo2.go b/vendor/github.com/flosch/pongo2/pongo2.go
deleted file mode 100644
index eda3aa07cbe2..000000000000
--- a/vendor/github.com/flosch/pongo2/pongo2.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package pongo2
-
-// Version string
-const Version = "dev"
-
-// Must panics, if a Template couldn't successfully parsed. This is how you
-// would use it:
-// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
-func Must(tpl *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return tpl
-}
diff --git a/vendor/github.com/flosch/pongo2/tags.go b/vendor/github.com/flosch/pongo2/tags.go
deleted file mode 100644
index 3668b06a220a..000000000000
--- a/vendor/github.com/flosch/pongo2/tags.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package pongo2
-
-/* Incomplete:
- -----------
-
- verbatim (only the "name" argument is missing for verbatim)
-
- Reconsideration:
- ----------------
-
- debug (reason: not sure what to output yet)
- regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
-
- Following built-in tags wont be added:
- --------------------------------------
-
- csrf_token (reason: web-framework specific)
- load (reason: python-specific)
- url (reason: web-framework specific)
-*/
-
-import (
- "fmt"
-
- "github.com/juju/errors"
-)
-
-type INodeTag interface {
- INode
-}
-
-// This is the function signature of the tag's parser you will have
-// to implement in order to create a new tag.
-//
-// 'doc' is providing access to the whole document while 'arguments'
-// is providing access to the user's arguments to the tag:
-//
-// {% your_tag_name some "arguments" 123 %}
-//
-// start_token will be the *Token with the tag's name in it (here: your_tag_name).
-//
-// Please see the Parser documentation on how to use the parser.
-// See RegisterTag()'s documentation for more information about
-// writing a tag as well.
-type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
-
-type tag struct {
- name string
- parser TagParser
-}
-
-var tags map[string]*tag
-
-func init() {
- tags = make(map[string]*tag)
-}
-
-// Registers a new tag. You usually want to call this
-// function in the tag's init() function:
-// http://golang.org/doc/effective_go.html#init
-//
-// See http://www.florian-schlachter.de/post/pongo2/ for more about
-// writing filters and tags.
-func RegisterTag(name string, parserFn TagParser) error {
- _, existing := tags[name]
- if existing {
- return errors.Errorf("tag with name '%s' is already registered", name)
- }
- tags[name] = &tag{
- name: name,
- parser: parserFn,
- }
- return nil
-}
-
-// Replaces an already registered tag with a new implementation. Use this
-// function with caution since it allows you to change existing tag behaviour.
-func ReplaceTag(name string, parserFn TagParser) error {
- _, existing := tags[name]
- if !existing {
- return errors.Errorf("tag with name '%s' does not exist (therefore cannot be overridden)", name)
- }
- tags[name] = &tag{
- name: name,
- parser: parserFn,
- }
- return nil
-}
-
-// Tag = "{%" IDENT ARGS "%}"
-func (p *Parser) parseTagElement() (INodeTag, *Error) {
- p.Consume() // consume "{%"
- tokenName := p.MatchType(TokenIdentifier)
-
- // Check for identifier
- if tokenName == nil {
- return nil, p.Error("Tag name must be an identifier.", nil)
- }
-
- // Check for the existing tag
- tag, exists := tags[tokenName.Val]
- if !exists {
- // Does not exists
- return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName)
- }
-
- // Check sandbox tag restriction
- if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned {
- return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName)
- }
-
- var argsToken []*Token
- for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
- // Add token to args
- argsToken = append(argsToken, p.Current())
- p.Consume() // next token
- }
-
- // EOF?
- if p.Remaining() == 0 {
- return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken)
- }
-
- p.Match(TokenSymbol, "%}")
-
- argParser := newParser(p.name, argsToken, p.template)
- if len(argsToken) == 0 {
- // This is done to have nice EOF error messages
- argParser.lastToken = tokenName
- }
-
- p.template.level++
- defer func() { p.template.level-- }()
- return tag.parser(p, tokenName, argParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_autoescape.go b/vendor/github.com/flosch/pongo2/tags_autoescape.go
deleted file mode 100644
index 590a1db3506e..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_autoescape.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package pongo2
-
-type tagAutoescapeNode struct {
- wrapper *NodeWrapper
- autoescape bool
-}
-
-func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- old := ctx.Autoescape
- ctx.Autoescape = node.autoescape
-
- err := node.wrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
-
- ctx.Autoescape = old
-
- return nil
-}
-
-func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- autoescapeNode := &tagAutoescapeNode{}
-
- wrapper, _, err := doc.WrapUntilTag("endautoescape")
- if err != nil {
- return nil, err
- }
- autoescapeNode.wrapper = wrapper
-
- modeToken := arguments.MatchType(TokenIdentifier)
- if modeToken == nil {
- return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
- }
- if modeToken.Val == "on" {
- autoescapeNode.autoescape = true
- } else if modeToken.Val == "off" {
- autoescapeNode.autoescape = false
- } else {
- return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
- }
-
- return autoescapeNode, nil
-}
-
-func init() {
- RegisterTag("autoescape", tagAutoescapeParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_block.go b/vendor/github.com/flosch/pongo2/tags_block.go
deleted file mode 100644
index 86145f329aef..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_block.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
-)
-
-type tagBlockNode struct {
- name string
-}
-
-func (node *tagBlockNode) getBlockWrappers(tpl *Template) []*NodeWrapper {
- nodeWrappers := make([]*NodeWrapper, 0)
- var t *NodeWrapper
-
- for tpl != nil {
- t = tpl.blocks[node.name]
- if t != nil {
- nodeWrappers = append(nodeWrappers, t)
- }
- tpl = tpl.child
- }
-
- return nodeWrappers
-}
-
-func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- tpl := ctx.template
- if tpl == nil {
- panic("internal error: tpl == nil")
- }
-
- // Determine the block to execute
- blockWrappers := node.getBlockWrappers(tpl)
- lenBlockWrappers := len(blockWrappers)
-
- if lenBlockWrappers == 0 {
- return ctx.Error("internal error: len(block_wrappers) == 0 in tagBlockNode.Execute()", nil)
- }
-
- blockWrapper := blockWrappers[lenBlockWrappers-1]
- ctx.Private["block"] = tagBlockInformation{
- ctx: ctx,
- wrappers: blockWrappers[0 : lenBlockWrappers-1],
- }
- err := blockWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-type tagBlockInformation struct {
- ctx *ExecutionContext
- wrappers []*NodeWrapper
-}
-
-func (t tagBlockInformation) Super() string {
- lenWrappers := len(t.wrappers)
-
- if lenWrappers == 0 {
- return ""
- }
-
- superCtx := NewChildExecutionContext(t.ctx)
- superCtx.Private["block"] = tagBlockInformation{
- ctx: t.ctx,
- wrappers: t.wrappers[0 : lenWrappers-1],
- }
-
- blockWrapper := t.wrappers[lenWrappers-1]
- buf := bytes.NewBufferString("")
- err := blockWrapper.Execute(superCtx, &templateWriter{buf})
- if err != nil {
- return ""
- }
- return buf.String()
-}
-
-func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- if arguments.Count() == 0 {
- return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
- }
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
- }
-
- if arguments.Remaining() != 0 {
- return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
- }
-
- wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
- if err != nil {
- return nil, err
- }
- if endtagargs.Remaining() > 0 {
- endtagnameToken := endtagargs.MatchType(TokenIdentifier)
- if endtagnameToken != nil {
- if endtagnameToken.Val != nameToken.Val {
- return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
- nameToken.Val, endtagnameToken.Val), nil)
- }
- }
-
- if endtagnameToken == nil || endtagargs.Remaining() > 0 {
- return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
- }
- }
-
- tpl := doc.template
- if tpl == nil {
- panic("internal error: tpl == nil")
- }
- _, hasBlock := tpl.blocks[nameToken.Val]
- if !hasBlock {
- tpl.blocks[nameToken.Val] = wrapper
- } else {
- return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil)
- }
-
- return &tagBlockNode{name: nameToken.Val}, nil
-}
-
-func init() {
- RegisterTag("block", tagBlockParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_comment.go b/vendor/github.com/flosch/pongo2/tags_comment.go
deleted file mode 100644
index 56a02ed99db4..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_comment.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package pongo2
-
-type tagCommentNode struct{}
-
-func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- commentNode := &tagCommentNode{}
-
- // TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
- err := doc.SkipUntilTag("endcomment")
- if err != nil {
- return nil, err
- }
-
- if arguments.Count() != 0 {
- return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
- }
-
- return commentNode, nil
-}
-
-func init() {
- RegisterTag("comment", tagCommentParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_cycle.go b/vendor/github.com/flosch/pongo2/tags_cycle.go
deleted file mode 100644
index ffbd254eea8b..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_cycle.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package pongo2
-
-type tagCycleValue struct {
- node *tagCycleNode
- value *Value
-}
-
-type tagCycleNode struct {
- position *Token
- args []IEvaluator
- idx int
- asName string
- silent bool
-}
-
-func (cv *tagCycleValue) String() string {
- return cv.value.String()
-}
-
-func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- item := node.args[node.idx%len(node.args)]
- node.idx++
-
- val, err := item.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if t, ok := val.Interface().(*tagCycleValue); ok {
- // {% cycle "test1" "test2"
- // {% cycle cycleitem %}
-
- // Update the cycle value with next value
- item := t.node.args[t.node.idx%len(t.node.args)]
- t.node.idx++
-
- val, err := item.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- t.value = val
-
- if !t.node.silent {
- writer.WriteString(val.String())
- }
- } else {
- // Regular call
-
- cycleValue := &tagCycleValue{
- node: node,
- value: val,
- }
-
- if node.asName != "" {
- ctx.Private[node.asName] = cycleValue
- }
- if !node.silent {
- writer.WriteString(val.String())
- }
- }
-
- return nil
-}
-
-// HINT: We're not supporting the old comma-separated list of expressions argument-style
-func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- cycleNode := &tagCycleNode{
- position: start,
- }
-
- for arguments.Remaining() > 0 {
- node, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- cycleNode.args = append(cycleNode.args, node)
-
- if arguments.MatchOne(TokenKeyword, "as") != nil {
- // as
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
- }
- cycleNode.asName = nameToken.Val
-
- if arguments.MatchOne(TokenIdentifier, "silent") != nil {
- cycleNode.silent = true
- }
-
- // Now we're finished
- break
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed cycle-tag.", nil)
- }
-
- return cycleNode, nil
-}
-
-func init() {
- RegisterTag("cycle", tagCycleParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_extends.go b/vendor/github.com/flosch/pongo2/tags_extends.go
deleted file mode 100644
index 5771020a0661..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_extends.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package pongo2
-
-type tagExtendsNode struct {
- filename string
-}
-
-func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- extendsNode := &tagExtendsNode{}
-
- if doc.template.level > 1 {
- return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
- }
-
- if doc.template.parent != nil {
- // Already one parent
- return nil, arguments.Error("This template has already one parent.", start)
- }
-
- if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
- // prepared, static template
-
- // Get parent's filename
- parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- // Parse the parent
- parentTemplate, err := doc.template.set.FromFile(parentFilename)
- if err != nil {
- return nil, err.(*Error)
- }
-
- // Keep track of things
- parentTemplate.child = doc.template
- doc.template.parent = parentTemplate
- extendsNode.filename = parentFilename
- } else {
- return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
- }
-
- return extendsNode, nil
-}
-
-func init() {
- RegisterTag("extends", tagExtendsParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_filter.go b/vendor/github.com/flosch/pongo2/tags_filter.go
deleted file mode 100644
index b38fd929821d..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_filter.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package pongo2
-
-import (
- "bytes"
-)
-
-type nodeFilterCall struct {
- name string
- paramExpr IEvaluator
-}
-
-type tagFilterNode struct {
- position *Token
- bodyWrapper *NodeWrapper
- filterChain []*nodeFilterCall
-}
-
-func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
-
- err := node.bodyWrapper.Execute(ctx, temp)
- if err != nil {
- return err
- }
-
- value := AsValue(temp.String())
-
- for _, call := range node.filterChain {
- var param *Value
- if call.paramExpr != nil {
- param, err = call.paramExpr.Evaluate(ctx)
- if err != nil {
- return err
- }
- } else {
- param = AsValue(nil)
- }
- value, err = ApplyFilter(call.name, value, param)
- if err != nil {
- return ctx.Error(err.Error(), node.position)
- }
- }
-
- writer.WriteString(value.String())
-
- return nil
-}
-
-func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- filterNode := &tagFilterNode{
- position: start,
- }
-
- wrapper, _, err := doc.WrapUntilTag("endfilter")
- if err != nil {
- return nil, err
- }
- filterNode.bodyWrapper = wrapper
-
- for arguments.Remaining() > 0 {
- filterCall := &nodeFilterCall{}
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Expected a filter name (identifier).", nil)
- }
- filterCall.name = nameToken.Val
-
- if arguments.MatchOne(TokenSymbol, ":") != nil {
- // Filter parameter
- // NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
- expr, err := arguments.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- filterCall.paramExpr = expr
- }
-
- filterNode.filterChain = append(filterNode.filterChain, filterCall)
-
- if arguments.MatchOne(TokenSymbol, "|") == nil {
- break
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed filter-tag arguments.", nil)
- }
-
- return filterNode, nil
-}
-
-func init() {
- RegisterTag("filter", tagFilterParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_firstof.go b/vendor/github.com/flosch/pongo2/tags_firstof.go
deleted file mode 100644
index 5b2888e2be5d..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_firstof.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package pongo2
-
-type tagFirstofNode struct {
- position *Token
- args []IEvaluator
-}
-
-func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, arg := range node.args {
- val, err := arg.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if val.IsTrue() {
- if ctx.Autoescape && !arg.FilterApplied("safe") {
- val, err = ApplyFilter("escape", val, nil)
- if err != nil {
- return err
- }
- }
-
- writer.WriteString(val.String())
- return nil
- }
- }
-
- return nil
-}
-
-func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- firstofNode := &tagFirstofNode{
- position: start,
- }
-
- for arguments.Remaining() > 0 {
- node, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- firstofNode.args = append(firstofNode.args, node)
- }
-
- return firstofNode, nil
-}
-
-func init() {
- RegisterTag("firstof", tagFirstofParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_for.go b/vendor/github.com/flosch/pongo2/tags_for.go
deleted file mode 100644
index 5b0b5554c83f..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_for.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package pongo2
-
-type tagForNode struct {
- key string
- value string // only for maps: for key, value in map
- objectEvaluator IEvaluator
- reversed bool
- sorted bool
-
- bodyWrapper *NodeWrapper
- emptyWrapper *NodeWrapper
-}
-
-type tagForLoopInformation struct {
- Counter int
- Counter0 int
- Revcounter int
- Revcounter0 int
- First bool
- Last bool
- Parentloop *tagForLoopInformation
-}
-
-func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
- // Backup forloop (as parentloop in public context), key-name and value-name
- forCtx := NewChildExecutionContext(ctx)
- parentloop := forCtx.Private["forloop"]
-
- // Create loop struct
- loopInfo := &tagForLoopInformation{
- First: true,
- }
-
- // Is it a loop in a loop?
- if parentloop != nil {
- loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
- }
-
- // Register loopInfo in public context
- forCtx.Private["forloop"] = loopInfo
-
- obj, err := node.objectEvaluator.Evaluate(forCtx)
- if err != nil {
- return err
- }
-
- obj.IterateOrder(func(idx, count int, key, value *Value) bool {
- // There's something to iterate over (correct type and at least 1 item)
-
- // Update loop infos and public context
- forCtx.Private[node.key] = key
- if value != nil {
- forCtx.Private[node.value] = value
- }
- loopInfo.Counter = idx + 1
- loopInfo.Counter0 = idx
- if idx == 1 {
- loopInfo.First = false
- }
- if idx+1 == count {
- loopInfo.Last = true
- }
- loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
- loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
-
- // Render elements with updated context
- err := node.bodyWrapper.Execute(forCtx, writer)
- if err != nil {
- forError = err
- return false
- }
- return true
- }, func() {
- // Nothing to iterate over (maybe wrong type or no items)
- if node.emptyWrapper != nil {
- err := node.emptyWrapper.Execute(forCtx, writer)
- if err != nil {
- forError = err
- }
- }
- }, node.reversed, node.sorted)
-
- return forError
-}
-
-func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- forNode := &tagForNode{}
-
- // Arguments parsing
- var valueToken *Token
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
- }
-
- if arguments.Match(TokenSymbol, ",") != nil {
- // Value name is provided
- valueToken = arguments.MatchType(TokenIdentifier)
- if valueToken == nil {
- return nil, arguments.Error("Value name must be an identifier.", nil)
- }
- }
-
- if arguments.Match(TokenKeyword, "in") == nil {
- return nil, arguments.Error("Expected keyword 'in'.", nil)
- }
-
- objectEvaluator, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- forNode.objectEvaluator = objectEvaluator
- forNode.key = keyToken.Val
- if valueToken != nil {
- forNode.value = valueToken.Val
- }
-
- if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
- forNode.reversed = true
- }
-
- if arguments.MatchOne(TokenIdentifier, "sorted") != nil {
- forNode.sorted = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed for-loop arguments.", nil)
- }
-
- // Body wrapping
- wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
- if err != nil {
- return nil, err
- }
- forNode.bodyWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "empty" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endfor")
- if err != nil {
- return nil, err
- }
- forNode.emptyWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return forNode, nil
-}
-
-func init() {
- RegisterTag("for", tagForParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_if.go b/vendor/github.com/flosch/pongo2/tags_if.go
deleted file mode 100644
index 3eeaf3b49983..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_if.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package pongo2
-
-type tagIfNode struct {
- conditions []IEvaluator
- wrappers []*NodeWrapper
-}
-
-func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for i, condition := range node.conditions {
- result, err := condition.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if result.IsTrue() {
- return node.wrappers[i].Execute(ctx, writer)
- }
- // Last condition?
- if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
- return node.wrappers[i+1].Execute(ctx, writer)
- }
- }
- return nil
-}
-
-func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifNode := &tagIfNode{}
-
- // Parse first and main IF condition
- condition, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifNode.conditions = append(ifNode.conditions, condition)
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("If-condition is malformed.", nil)
- }
-
- // Check the rest
- for {
- wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif")
- if err != nil {
- return nil, err
- }
- ifNode.wrappers = append(ifNode.wrappers, wrapper)
-
- if wrapper.Endtag == "elif" {
- // elif can take a condition
- condition, err = tagArgs.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifNode.conditions = append(ifNode.conditions, condition)
-
- if tagArgs.Remaining() > 0 {
- return nil, tagArgs.Error("Elif-condition is malformed.", nil)
- }
- } else {
- if tagArgs.Count() > 0 {
- // else/endif can't take any conditions
- return nil, tagArgs.Error("Arguments not allowed here.", nil)
- }
- }
-
- if wrapper.Endtag == "endif" {
- break
- }
- }
-
- return ifNode, nil
-}
-
-func init() {
- RegisterTag("if", tagIfParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifchanged.go b/vendor/github.com/flosch/pongo2/tags_ifchanged.go
deleted file mode 100644
index 45296a0a3459..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_ifchanged.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package pongo2
-
-import (
- "bytes"
-)
-
-type tagIfchangedNode struct {
- watchedExpr []IEvaluator
- lastValues []*Value
- lastContent []byte
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- if len(node.watchedExpr) == 0 {
- // Check against own rendered body
-
- buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
- err := node.thenWrapper.Execute(ctx, buf)
- if err != nil {
- return err
- }
-
- bufBytes := buf.Bytes()
- if !bytes.Equal(node.lastContent, bufBytes) {
- // Rendered content changed, output it
- writer.Write(bufBytes)
- node.lastContent = bufBytes
- }
- } else {
- nowValues := make([]*Value, 0, len(node.watchedExpr))
- for _, expr := range node.watchedExpr {
- val, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- nowValues = append(nowValues, val)
- }
-
- // Compare old to new values now
- changed := len(node.lastValues) == 0
-
- for idx, oldVal := range node.lastValues {
- if !oldVal.EqualValueTo(nowValues[idx]) {
- changed = true
- break // we can stop here because ONE value changed
- }
- }
-
- node.lastValues = nowValues
-
- if changed {
- // Render thenWrapper
- err := node.thenWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
- } else {
- // Render elseWrapper
- err := node.elseWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifchangedNode := &tagIfchangedNode{}
-
- for arguments.Remaining() > 0 {
- // Parse condition
- expr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
- if err != nil {
- return nil, err
- }
- ifchangedNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
- if err != nil {
- return nil, err
- }
- ifchangedNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifchangedNode, nil
-}
-
-func init() {
- RegisterTag("ifchanged", tagIfchangedParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifequal.go b/vendor/github.com/flosch/pongo2/tags_ifequal.go
deleted file mode 100644
index 103f1c7ba6ea..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_ifequal.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package pongo2
-
-type tagIfEqualNode struct {
- var1, var2 IEvaluator
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- r1, err := node.var1.Evaluate(ctx)
- if err != nil {
- return err
- }
- r2, err := node.var2.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- result := r1.EqualValueTo(r2)
-
- if result {
- return node.thenWrapper.Execute(ctx, writer)
- }
- if node.elseWrapper != nil {
- return node.elseWrapper.Execute(ctx, writer)
- }
- return nil
-}
-
-func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifequalNode := &tagIfEqualNode{}
-
- // Parse two expressions
- var1, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- var2, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifequalNode.var1 = var1
- ifequalNode.var2 = var2
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
- if err != nil {
- return nil, err
- }
- ifequalNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifequal")
- if err != nil {
- return nil, err
- }
- ifequalNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifequalNode, nil
-}
-
-func init() {
- RegisterTag("ifequal", tagIfEqualParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go b/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
deleted file mode 100644
index 0d287d349d00..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_ifnotequal.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package pongo2
-
-type tagIfNotEqualNode struct {
- var1, var2 IEvaluator
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- r1, err := node.var1.Evaluate(ctx)
- if err != nil {
- return err
- }
- r2, err := node.var2.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- result := !r1.EqualValueTo(r2)
-
- if result {
- return node.thenWrapper.Execute(ctx, writer)
- }
- if node.elseWrapper != nil {
- return node.elseWrapper.Execute(ctx, writer)
- }
- return nil
-}
-
-func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifnotequalNode := &tagIfNotEqualNode{}
-
- // Parse two expressions
- var1, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- var2, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifnotequalNode.var1 = var1
- ifnotequalNode.var2 = var2
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal")
- if err != nil {
- return nil, err
- }
- ifnotequalNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifnotequal")
- if err != nil {
- return nil, err
- }
- ifnotequalNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifnotequalNode, nil
-}
-
-func init() {
- RegisterTag("ifnotequal", tagIfNotEqualParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_import.go b/vendor/github.com/flosch/pongo2/tags_import.go
deleted file mode 100644
index 7e0d6a01a51f..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_import.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package pongo2
-
-import (
- "fmt"
-)
-
-type tagImportNode struct {
- position *Token
- filename string
- macros map[string]*tagMacroNode // alias/name -> macro instance
-}
-
-func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for name, macro := range node.macros {
- func(name string, macro *tagMacroNode) {
- ctx.Private[name] = func(args ...*Value) *Value {
- return macro.call(ctx, args...)
- }
- }(name, macro)
- }
- return nil
-}
-
-func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- importNode := &tagImportNode{
- position: start,
- macros: make(map[string]*tagMacroNode),
- }
-
- filenameToken := arguments.MatchType(TokenString)
- if filenameToken == nil {
- return nil, arguments.Error("Import-tag needs a filename as string.", nil)
- }
-
- importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- if arguments.Remaining() == 0 {
- return nil, arguments.Error("You must at least specify one macro to import.", nil)
- }
-
- // Compile the given template
- tpl, err := doc.template.set.FromFile(importNode.filename)
- if err != nil {
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
- }
-
- for arguments.Remaining() > 0 {
- macroNameToken := arguments.MatchType(TokenIdentifier)
- if macroNameToken == nil {
- return nil, arguments.Error("Expected macro name (identifier).", nil)
- }
-
- asName := macroNameToken.Val
- if arguments.Match(TokenKeyword, "as") != nil {
- aliasToken := arguments.MatchType(TokenIdentifier)
- if aliasToken == nil {
- return nil, arguments.Error("Expected macro alias name (identifier).", nil)
- }
- asName = aliasToken.Val
- }
-
- macroInstance, has := tpl.exportedMacros[macroNameToken.Val]
- if !has {
- return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val,
- importNode.filename), macroNameToken)
- }
-
- importNode.macros[asName] = macroInstance
-
- if arguments.Remaining() == 0 {
- break
- }
-
- if arguments.Match(TokenSymbol, ",") == nil {
- return nil, arguments.Error("Expected ','.", nil)
- }
- }
-
- return importNode, nil
-}
-
-func init() {
- RegisterTag("import", tagImportParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_include.go b/vendor/github.com/flosch/pongo2/tags_include.go
deleted file mode 100644
index 6d619fdabebc..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_include.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package pongo2
-
-type tagIncludeNode struct {
- tpl *Template
- filenameEvaluator IEvaluator
- lazy bool
- only bool
- filename string
- withPairs map[string]IEvaluator
- ifExists bool
-}
-
-func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- // Building the context for the template
- includeCtx := make(Context)
-
- // Fill the context with all data from the parent
- if !node.only {
- includeCtx.Update(ctx.Public)
- includeCtx.Update(ctx.Private)
- }
-
- // Put all custom with-pairs into the context
- for key, value := range node.withPairs {
- val, err := value.Evaluate(ctx)
- if err != nil {
- return err
- }
- includeCtx[key] = val
- }
-
- // Execute the template
- if node.lazy {
- // Evaluate the filename
- filename, err := node.filenameEvaluator.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if filename.String() == "" {
- return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
- }
-
- // Get include-filename
- includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String())
-
- includedTpl, err2 := ctx.template.set.FromFile(includedFilename)
- if err2 != nil {
- // if this is ReadFile error, and "if_exists" flag is enabled
- if node.ifExists && err2.(*Error).Sender == "fromfile" {
- return nil
- }
- return err2.(*Error)
- }
- err2 = includedTpl.ExecuteWriter(includeCtx, writer)
- if err2 != nil {
- return err2.(*Error)
- }
- return nil
- }
- // Template is already parsed with static filename
- err := node.tpl.ExecuteWriter(includeCtx, writer)
- if err != nil {
- return err.(*Error)
- }
- return nil
-}
-
-type tagIncludeEmptyNode struct{}
-
-func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- includeNode := &tagIncludeNode{
- withPairs: make(map[string]IEvaluator),
- }
-
- if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
- // prepared, static template
-
- // "if_exists" flag
- ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil
-
- // Get include-filename
- includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- // Parse the parent
- includeNode.filename = includedFilename
- includedTpl, err := doc.template.set.FromFile(includedFilename)
- if err != nil {
- // if this is ReadFile error, and "if_exists" token presents we should create and empty node
- if err.(*Error).Sender == "fromfile" && ifExists {
- return &tagIncludeEmptyNode{}, nil
- }
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken)
- }
- includeNode.tpl = includedTpl
- } else {
- // No String, then the user wants to use lazy-evaluation (slower, but possible)
- filenameEvaluator, err := arguments.ParseExpression()
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken)
- }
- includeNode.filenameEvaluator = filenameEvaluator
- includeNode.lazy = true
- includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
- }
-
- // After having parsed the filename we're gonna parse the with+only options
- if arguments.Match(TokenIdentifier, "with") != nil {
- for arguments.Remaining() > 0 {
- // We have at least one key=expr pair (because of starting "with")
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(doc.template, keyToken)
- }
-
- includeNode.withPairs[keyToken.Val] = valueExpr
-
- // Only?
- if arguments.Match(TokenIdentifier, "only") != nil {
- includeNode.only = true
- break // stop parsing arguments because it's the last option
- }
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
- }
-
- return includeNode, nil
-}
-
-func init() {
- RegisterTag("include", tagIncludeParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_lorem.go b/vendor/github.com/flosch/pongo2/tags_lorem.go
deleted file mode 100644
index 1d353f267dd2..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_lorem.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package pongo2
-
-import (
- "math/rand"
- "strings"
- "time"
-
- "github.com/juju/errors"
-)
-
-var (
- tagLoremParagraphs = strings.Split(tagLoremText, "\n")
- tagLoremWords = strings.Fields(tagLoremText)
-)
-
-type tagLoremNode struct {
- position *Token
- count int // number of paragraphs
- method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
- random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
-}
-
-func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- switch node.method {
- case "b":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
- writer.WriteString(par)
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
- writer.WriteString(par)
- }
- }
- case "w":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString(" ")
- }
- word := tagLoremWords[rand.Intn(len(tagLoremWords))]
- writer.WriteString(word)
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString(" ")
- }
- word := tagLoremWords[i%len(tagLoremWords)]
- writer.WriteString(word)
- }
- }
- case "p":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- writer.WriteString("")
- par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
- writer.WriteString(par)
- writer.WriteString("
")
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- writer.WriteString("")
- par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
- writer.WriteString(par)
- writer.WriteString("
")
-
- }
- }
- default:
- return ctx.OrigError(errors.Errorf("unsupported method: %s", node.method), nil)
- }
-
- return nil
-}
-
-func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- loremNode := &tagLoremNode{
- position: start,
- count: 1,
- method: "b",
- }
-
- if countToken := arguments.MatchType(TokenNumber); countToken != nil {
- loremNode.count = AsValue(countToken.Val).Integer()
- }
-
- if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil {
- if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" {
- return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
- }
-
- loremNode.method = methodToken.Val
- }
-
- if arguments.MatchOne(TokenIdentifier, "random") != nil {
- loremNode.random = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
- }
-
- return loremNode, nil
-}
-
-func init() {
- rand.Seed(time.Now().Unix())
-
- RegisterTag("lorem", tagLoremParser)
-}
-
-const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
-Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
-Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
-Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
-Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
-At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
-Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`
diff --git a/vendor/github.com/flosch/pongo2/tags_macro.go b/vendor/github.com/flosch/pongo2/tags_macro.go
deleted file mode 100644
index dd3e0bf48a35..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_macro.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
-)
-
-type tagMacroNode struct {
- position *Token
- name string
- argsOrder []string
- args map[string]IEvaluator
- exported bool
-
- wrapper *NodeWrapper
-}
-
-func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- ctx.Private[node.name] = func(args ...*Value) *Value {
- return node.call(ctx, args...)
- }
-
- return nil
-}
-
-func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) *Value {
- argsCtx := make(Context)
-
- for k, v := range node.args {
- if v == nil {
- // User did not provided a default value
- argsCtx[k] = nil
- } else {
- // Evaluate the default value
- valueExpr, err := v.Evaluate(ctx)
- if err != nil {
- ctx.Logf(err.Error())
- return AsSafeValue(err.Error())
- }
-
- argsCtx[k] = valueExpr
- }
- }
-
- if len(args) > len(node.argsOrder) {
- // Too many arguments, we're ignoring them and just logging into debug mode.
- err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
- node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
-
- ctx.Logf(err.Error()) // TODO: This is a workaround, because the error is not returned yet to the Execution()-methods
- return AsSafeValue(err.Error())
- }
-
- // Make a context for the macro execution
- macroCtx := NewChildExecutionContext(ctx)
-
- // Register all arguments in the private context
- macroCtx.Private.Update(argsCtx)
-
- for idx, argValue := range args {
- macroCtx.Private[node.argsOrder[idx]] = argValue.Interface()
- }
-
- var b bytes.Buffer
- err := node.wrapper.Execute(macroCtx, &b)
- if err != nil {
- return AsSafeValue(err.updateFromTokenIfNeeded(ctx.template, node.position).Error())
- }
-
- return AsSafeValue(b.String())
-}
-
-func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- macroNode := &tagMacroNode{
- position: start,
- args: make(map[string]IEvaluator),
- }
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
- }
- macroNode.name = nameToken.Val
-
- if arguments.MatchOne(TokenSymbol, "(") == nil {
- return nil, arguments.Error("Expected '('.", nil)
- }
-
- for arguments.Match(TokenSymbol, ")") == nil {
- argNameToken := arguments.MatchType(TokenIdentifier)
- if argNameToken == nil {
- return nil, arguments.Error("Expected argument name as identifier.", nil)
- }
- macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val)
-
- if arguments.Match(TokenSymbol, "=") != nil {
- // Default expression follows
- argDefaultExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- macroNode.args[argNameToken.Val] = argDefaultExpr
- } else {
- // No default expression
- macroNode.args[argNameToken.Val] = nil
- }
-
- if arguments.Match(TokenSymbol, ")") != nil {
- break
- }
- if arguments.Match(TokenSymbol, ",") == nil {
- return nil, arguments.Error("Expected ',' or ')'.", nil)
- }
- }
-
- if arguments.Match(TokenKeyword, "export") != nil {
- macroNode.exported = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed macro-tag.", nil)
- }
-
- // Body wrapping
- wrapper, endargs, err := doc.WrapUntilTag("endmacro")
- if err != nil {
- return nil, err
- }
- macroNode.wrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if macroNode.exported {
- // Now register the macro if it wants to be exported
- _, has := doc.template.exportedMacros[macroNode.name]
- if has {
- return nil, doc.Error(fmt.Sprintf("another macro with name '%s' already exported", macroNode.name), start)
- }
- doc.template.exportedMacros[macroNode.name] = macroNode
- }
-
- return macroNode, nil
-}
-
-func init() {
- RegisterTag("macro", tagMacroParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_now.go b/vendor/github.com/flosch/pongo2/tags_now.go
deleted file mode 100644
index d9fa4a371134..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_now.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package pongo2
-
-import (
- "time"
-)
-
-type tagNowNode struct {
- position *Token
- format string
- fake bool
-}
-
-func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- var t time.Time
- if node.fake {
- t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
- } else {
- t = time.Now()
- }
-
- writer.WriteString(t.Format(node.format))
-
- return nil
-}
-
-func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- nowNode := &tagNowNode{
- position: start,
- }
-
- formatToken := arguments.MatchType(TokenString)
- if formatToken == nil {
- return nil, arguments.Error("Expected a format string.", nil)
- }
- nowNode.format = formatToken.Val
-
- if arguments.MatchOne(TokenIdentifier, "fake") != nil {
- nowNode.fake = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed now-tag arguments.", nil)
- }
-
- return nowNode, nil
-}
-
-func init() {
- RegisterTag("now", tagNowParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_set.go b/vendor/github.com/flosch/pongo2/tags_set.go
deleted file mode 100644
index be121c12ac4f..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_set.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package pongo2
-
-type tagSetNode struct {
- name string
- expression IEvaluator
-}
-
-func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- // Evaluate expression
- value, err := node.expression.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- ctx.Private[node.name] = value
- return nil
-}
-
-func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- node := &tagSetNode{}
-
- // Parse variable name
- typeToken := arguments.MatchType(TokenIdentifier)
- if typeToken == nil {
- return nil, arguments.Error("Expected an identifier.", nil)
- }
- node.name = typeToken.Val
-
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
-
- // Variable expression
- keyExpression, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- node.expression = keyExpression
-
- // Remaining arguments
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
- }
-
- return node, nil
-}
-
-func init() {
- RegisterTag("set", tagSetParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_spaceless.go b/vendor/github.com/flosch/pongo2/tags_spaceless.go
deleted file mode 100644
index 4fa851ba45ba..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_spaceless.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "regexp"
-)
-
-type tagSpacelessNode struct {
- wrapper *NodeWrapper
-}
-
-var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`)
-
-func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
-
- err := node.wrapper.Execute(ctx, b)
- if err != nil {
- return err
- }
-
- s := b.String()
- // Repeat this recursively
- changed := true
- for changed {
- s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3")
- changed = s != s2
- s = s2
- }
-
- writer.WriteString(s)
-
- return nil
-}
-
-func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- spacelessNode := &tagSpacelessNode{}
-
- wrapper, _, err := doc.WrapUntilTag("endspaceless")
- if err != nil {
- return nil, err
- }
- spacelessNode.wrapper = wrapper
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed spaceless-tag arguments.", nil)
- }
-
- return spacelessNode, nil
-}
-
-func init() {
- RegisterTag("spaceless", tagSpacelessParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_ssi.go b/vendor/github.com/flosch/pongo2/tags_ssi.go
deleted file mode 100644
index c33858d5f1bd..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_ssi.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package pongo2
-
-import (
- "io/ioutil"
-)
-
-type tagSSINode struct {
- filename string
- content string
- template *Template
-}
-
-func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- if node.template != nil {
- // Execute the template within the current context
- includeCtx := make(Context)
- includeCtx.Update(ctx.Public)
- includeCtx.Update(ctx.Private)
-
- err := node.template.execute(includeCtx, writer)
- if err != nil {
- return err.(*Error)
- }
- } else {
- // Just print out the content
- writer.WriteString(node.content)
- }
- return nil
-}
-
-func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- SSINode := &tagSSINode{}
-
- if fileToken := arguments.MatchType(TokenString); fileToken != nil {
- SSINode.filename = fileToken.Val
-
- if arguments.Match(TokenIdentifier, "parsed") != nil {
- // parsed
- temporaryTpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
- if err != nil {
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, fileToken)
- }
- SSINode.template = temporaryTpl
- } else {
- // plaintext
- buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
- if err != nil {
- return nil, (&Error{
- Sender: "tag:ssi",
- OrigError: err,
- }).updateFromTokenIfNeeded(doc.template, fileToken)
- }
- SSINode.content = string(buf)
- }
- } else {
- return nil, arguments.Error("First argument must be a string.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed SSI-tag argument.", nil)
- }
-
- return SSINode, nil
-}
-
-func init() {
- RegisterTag("ssi", tagSSIParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_templatetag.go b/vendor/github.com/flosch/pongo2/tags_templatetag.go
deleted file mode 100644
index 164b4dc3d07e..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_templatetag.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package pongo2
-
-type tagTemplateTagNode struct {
- content string
-}
-
-var templateTagMapping = map[string]string{
- "openblock": "{%",
- "closeblock": "%}",
- "openvariable": "{{",
- "closevariable": "}}",
- "openbrace": "{",
- "closebrace": "}",
- "opencomment": "{#",
- "closecomment": "#}",
-}
-
-func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- writer.WriteString(node.content)
- return nil
-}
-
-func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ttNode := &tagTemplateTagNode{}
-
- if argToken := arguments.MatchType(TokenIdentifier); argToken != nil {
- output, found := templateTagMapping[argToken.Val]
- if !found {
- return nil, arguments.Error("Argument not found", argToken)
- }
- ttNode.content = output
- } else {
- return nil, arguments.Error("Identifier expected.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed templatetag-tag argument.", nil)
- }
-
- return ttNode, nil
-}
-
-func init() {
- RegisterTag("templatetag", tagTemplateTagParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_widthratio.go b/vendor/github.com/flosch/pongo2/tags_widthratio.go
deleted file mode 100644
index 70c9c3e8af2e..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_widthratio.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math"
-)
-
-type tagWidthratioNode struct {
- position *Token
- current, max IEvaluator
- width IEvaluator
- ctxName string
-}
-
-func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- current, err := node.current.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- max, err := node.max.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- width, err := node.width.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5))
-
- if node.ctxName == "" {
- writer.WriteString(fmt.Sprintf("%d", value))
- } else {
- ctx.Private[node.ctxName] = value
- }
-
- return nil
-}
-
-func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- widthratioNode := &tagWidthratioNode{
- position: start,
- }
-
- current, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.current = current
-
- max, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.max = max
-
- width, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.width = width
-
- if arguments.MatchOne(TokenKeyword, "as") != nil {
- // Name follows
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Expected name (identifier).", nil)
- }
- widthratioNode.ctxName = nameToken.Val
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed widthratio-tag arguments.", nil)
- }
-
- return widthratioNode, nil
-}
-
-func init() {
- RegisterTag("widthratio", tagWidthratioParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/tags_with.go b/vendor/github.com/flosch/pongo2/tags_with.go
deleted file mode 100644
index 32b3c1c42812..000000000000
--- a/vendor/github.com/flosch/pongo2/tags_with.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pongo2
-
-type tagWithNode struct {
- withPairs map[string]IEvaluator
- wrapper *NodeWrapper
-}
-
-func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- //new context for block
- withctx := NewChildExecutionContext(ctx)
-
- // Put all custom with-pairs into the context
- for key, value := range node.withPairs {
- val, err := value.Evaluate(ctx)
- if err != nil {
- return err
- }
- withctx.Private[key] = val
- }
-
- return node.wrapper.Execute(withctx, writer)
-}
-
-func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- withNode := &tagWithNode{
- withPairs: make(map[string]IEvaluator),
- }
-
- if arguments.Count() == 0 {
- return nil, arguments.Error("Tag 'with' requires at least one argument.", nil)
- }
-
- wrapper, endargs, err := doc.WrapUntilTag("endwith")
- if err != nil {
- return nil, err
- }
- withNode.wrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- // Scan through all arguments to see which style the user uses (old or new style).
- // If we find any "as" keyword we will enforce old style; otherwise we will use new style.
- oldStyle := false // by default we're using the new_style
- for i := 0; i < arguments.Count(); i++ {
- if arguments.PeekN(i, TokenKeyword, "as") != nil {
- oldStyle = true
- break
- }
- }
-
- for arguments.Remaining() > 0 {
- if oldStyle {
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- if arguments.Match(TokenKeyword, "as") == nil {
- return nil, arguments.Error("Expected 'as' keyword.", nil)
- }
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- withNode.withPairs[keyToken.Val] = valueExpr
- } else {
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- withNode.withPairs[keyToken.Val] = valueExpr
- }
- }
-
- return withNode, nil
-}
-
-func init() {
- RegisterTag("with", tagWithParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/template.go b/vendor/github.com/flosch/pongo2/template.go
deleted file mode 100644
index fbe2106ffd92..000000000000
--- a/vendor/github.com/flosch/pongo2/template.go
+++ /dev/null
@@ -1,277 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "io"
- "strings"
-
- "github.com/juju/errors"
-)
-
-type TemplateWriter interface {
- io.Writer
- WriteString(string) (int, error)
-}
-
-type templateWriter struct {
- w io.Writer
-}
-
-func (tw *templateWriter) WriteString(s string) (int, error) {
- return tw.w.Write([]byte(s))
-}
-
-func (tw *templateWriter) Write(b []byte) (int, error) {
- return tw.w.Write(b)
-}
-
-type Template struct {
- set *TemplateSet
-
- // Input
- isTplString bool
- name string
- tpl string
- size int
-
- // Calculation
- tokens []*Token
- parser *Parser
-
- // first come, first serve (it's important to not override existing entries in here)
- level int
- parent *Template
- child *Template
- blocks map[string]*NodeWrapper
- exportedMacros map[string]*tagMacroNode
-
- // Output
- root *nodeDocument
-
- // Options allow you to change the behavior of template-engine.
- // You can change the options before calling the Execute method.
- Options *Options
-}
-
-func newTemplateString(set *TemplateSet, tpl []byte) (*Template, error) {
- return newTemplate(set, "", true, tpl)
-}
-
-func newTemplate(set *TemplateSet, name string, isTplString bool, tpl []byte) (*Template, error) {
- strTpl := string(tpl)
-
- // Create the template
- t := &Template{
- set: set,
- isTplString: isTplString,
- name: name,
- tpl: strTpl,
- size: len(strTpl),
- blocks: make(map[string]*NodeWrapper),
- exportedMacros: make(map[string]*tagMacroNode),
- Options: newOptions(),
- }
- // Copy all settings from another Options.
- t.Options.Update(set.Options)
-
- // Tokenize it
- tokens, err := lex(name, strTpl)
- if err != nil {
- return nil, err
- }
- t.tokens = tokens
-
- // For debugging purposes, show all tokens:
- /*for i, t := range tokens {
- fmt.Printf("%3d. %s\n", i, t)
- }*/
-
- // Parse it
- err = t.parse()
- if err != nil {
- return nil, err
- }
-
- return t, nil
-}
-
-func (tpl *Template) newContextForExecution(context Context) (*Template, *ExecutionContext, error) {
- if tpl.Options.TrimBlocks || tpl.Options.LStripBlocks {
- // Issue #94 https://github.com/flosch/pongo2/issues/94
- // If an application configures pongo2 template to trim_blocks,
- // the first newline after a template tag is removed automatically (like in PHP).
- prev := &Token{
- Typ: TokenHTML,
- Val: "\n",
- }
-
- for _, t := range tpl.tokens {
- if tpl.Options.LStripBlocks {
- if prev.Typ == TokenHTML && t.Typ != TokenHTML && t.Val == "{%" {
- prev.Val = strings.TrimRight(prev.Val, "\t ")
- }
- }
-
- if tpl.Options.TrimBlocks {
- if prev.Typ != TokenHTML && t.Typ == TokenHTML && prev.Val == "%}" {
- if len(t.Val) > 0 && t.Val[0] == '\n' {
- t.Val = t.Val[1:len(t.Val)]
- }
- }
- }
-
- prev = t
- }
- }
-
- // Determine the parent to be executed (for template inheritance)
- parent := tpl
- for parent.parent != nil {
- parent = parent.parent
- }
-
- // Create context if none is given
- newContext := make(Context)
- newContext.Update(tpl.set.Globals)
-
- if context != nil {
- newContext.Update(context)
-
- if len(newContext) > 0 {
- // Check for context name syntax
- err := newContext.checkForValidIdentifiers()
- if err != nil {
- return parent, nil, err
- }
-
- // Check for clashes with macro names
- for k := range newContext {
- _, has := tpl.exportedMacros[k]
- if has {
- return parent, nil, &Error{
- Filename: tpl.name,
- Sender: "execution",
- OrigError: errors.Errorf("context key name '%s' clashes with macro '%s'", k, k),
- }
- }
- }
- }
- }
-
- // Create operational context
- ctx := newExecutionContext(parent, newContext)
-
- return parent, ctx, nil
-}
-
-func (tpl *Template) execute(context Context, writer TemplateWriter) error {
- parent, ctx, err := tpl.newContextForExecution(context)
- if err != nil {
- return err
- }
-
- // Run the selected document
- if err := parent.root.Execute(ctx, writer); err != nil {
- return err
- }
-
- return nil
-}
-
-func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error {
- return tpl.execute(context, &templateWriter{w: writer})
-}
-
-func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) {
- // Create output buffer
- // We assume that the rendered template will be 30% larger
- buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3)))
- if err := tpl.execute(context, buffer); err != nil {
- return nil, err
- }
- return buffer, nil
-}
-
-// Executes the template with the given context and writes to writer (io.Writer)
-// on success. Context can be nil. Nothing is written on error; instead the error
-// is being returned.
-func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error {
- buf, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return err
- }
- _, err = buf.WriteTo(writer)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Same as ExecuteWriter. The only difference between both functions is that
-// this function might already have written parts of the generated template in the
-// case of an execution error because there's no intermediate buffer involved for
-// performance reasons. This is handy if you need high performance template
-// generation or if you want to manage your own pool of buffers.
-func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error {
- return tpl.newTemplateWriterAndExecute(context, writer)
-}
-
-// Executes the template and returns the rendered template as a []byte
-func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) {
- // Execute template
- buffer, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return nil, err
- }
- return buffer.Bytes(), nil
-}
-
-// Executes the template and returns the rendered template as a string
-func (tpl *Template) Execute(context Context) (string, error) {
- // Execute template
- buffer, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return "", err
- }
-
- return buffer.String(), nil
-
-}
-
-func (tpl *Template) ExecuteBlocks(context Context, blocks []string) (map[string]string, error) {
- var parents []*Template
- result := make(map[string]string)
-
- parent := tpl
- for parent != nil {
- parents = append(parents, parent)
- parent = parent.parent
- }
-
- for _, t := range parents {
- buffer := bytes.NewBuffer(make([]byte, 0, int(float64(t.size)*1.3)))
- _, ctx, err := t.newContextForExecution(context)
- if err != nil {
- return nil, err
- }
- for _, blockName := range blocks {
- if _, ok := result[blockName]; ok {
- continue
- }
- if blockWrapper, ok := t.blocks[blockName]; ok {
- bErr := blockWrapper.Execute(ctx, buffer)
- if bErr != nil {
- return nil, bErr
- }
- result[blockName] = buffer.String()
- buffer.Reset()
- }
- }
- // We have found all blocks
- if len(blocks) == len(result) {
- break
- }
- }
-
- return result, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/template_loader.go b/vendor/github.com/flosch/pongo2/template_loader.go
deleted file mode 100644
index bc80f4ab72fe..000000000000
--- a/vendor/github.com/flosch/pongo2/template_loader.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
-
- "github.com/juju/errors"
-)
-
-// LocalFilesystemLoader represents a local filesystem loader with basic
-// BaseDirectory capabilities. The access to the local filesystem is unrestricted.
-type LocalFilesystemLoader struct {
- baseDir string
-}
-
-// MustNewLocalFileSystemLoader creates a new LocalFilesystemLoader instance
-// and panics if there's any error during instantiation. The parameters
-// are the same like NewLocalFileSystemLoader.
-func MustNewLocalFileSystemLoader(baseDir string) *LocalFilesystemLoader {
- fs, err := NewLocalFileSystemLoader(baseDir)
- if err != nil {
- log.Panic(err)
- }
- return fs
-}
-
-// NewLocalFileSystemLoader creates a new LocalFilesystemLoader and allows
-// templatesto be loaded from disk (unrestricted). If any base directory
-// is given (or being set using SetBaseDir), this base directory is being used
-// for path calculation in template inclusions/imports. Otherwise the path
-// is calculated based relatively to the including template's path.
-func NewLocalFileSystemLoader(baseDir string) (*LocalFilesystemLoader, error) {
- fs := &LocalFilesystemLoader{}
- if baseDir != "" {
- if err := fs.SetBaseDir(baseDir); err != nil {
- return nil, err
- }
- }
- return fs, nil
-}
-
-// SetBaseDir sets the template's base directory. This directory will
-// be used for any relative path in filters, tags and From*-functions to determine
-// your template. See the comment for NewLocalFileSystemLoader as well.
-func (fs *LocalFilesystemLoader) SetBaseDir(path string) error {
- // Make the path absolute
- if !filepath.IsAbs(path) {
- abs, err := filepath.Abs(path)
- if err != nil {
- return err
- }
- path = abs
- }
-
- // Check for existence
- fi, err := os.Stat(path)
- if err != nil {
- return err
- }
- if !fi.IsDir() {
- return errors.Errorf("The given path '%s' is not a directory.", path)
- }
-
- fs.baseDir = path
- return nil
-}
-
-// Get reads the path's content from your local filesystem.
-func (fs *LocalFilesystemLoader) Get(path string) (io.Reader, error) {
- buf, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- return bytes.NewReader(buf), nil
-}
-
-// Abs resolves a filename relative to the base directory. Absolute paths are allowed.
-// When there's no base dir set, the absolute path to the filename
-// will be calculated based on either the provided base directory (which
-// might be a path of a template which includes another template) or
-// the current working directory.
-func (fs *LocalFilesystemLoader) Abs(base, name string) string {
- if filepath.IsAbs(name) {
- return name
- }
-
- // Our own base dir has always priority; if there's none
- // we use the path provided in base.
- var err error
- if fs.baseDir == "" {
- if base == "" {
- base, err = os.Getwd()
- if err != nil {
- panic(err)
- }
- return filepath.Join(base, name)
- }
-
- return filepath.Join(filepath.Dir(base), name)
- }
-
- return filepath.Join(fs.baseDir, name)
-}
-
-// SandboxedFilesystemLoader is still WIP.
-type SandboxedFilesystemLoader struct {
- *LocalFilesystemLoader
-}
-
-// NewSandboxedFilesystemLoader creates a new sandboxed local file system instance.
-func NewSandboxedFilesystemLoader(baseDir string) (*SandboxedFilesystemLoader, error) {
- fs, err := NewLocalFileSystemLoader(baseDir)
- if err != nil {
- return nil, err
- }
- return &SandboxedFilesystemLoader{
- LocalFilesystemLoader: fs,
- }, nil
-}
-
-// Move sandbox to a virtual fs
-
-/*
-if len(set.SandboxDirectories) > 0 {
- defer func() {
- // Remove any ".." or other crap
- resolvedPath = filepath.Clean(resolvedPath)
-
- // Make the path absolute
- absPath, err := filepath.Abs(resolvedPath)
- if err != nil {
- panic(err)
- }
- resolvedPath = absPath
-
- // Check against the sandbox directories (once one pattern matches, we're done and can allow it)
- for _, pattern := range set.SandboxDirectories {
- matched, err := filepath.Match(pattern, resolvedPath)
- if err != nil {
- panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).")
- }
- if matched {
- // OK!
- return
- }
- }
-
- // No pattern matched, we have to log+deny the request
- set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolvedPath)
- resolvedPath = ""
- }()
-}
-*/
diff --git a/vendor/github.com/flosch/pongo2/template_sets.go b/vendor/github.com/flosch/pongo2/template_sets.go
deleted file mode 100644
index 78b3c8d010bb..000000000000
--- a/vendor/github.com/flosch/pongo2/template_sets.go
+++ /dev/null
@@ -1,305 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "sync"
-
- "github.com/juju/errors"
-)
-
-// TemplateLoader allows to implement a virtual file system.
-type TemplateLoader interface {
- // Abs calculates the path to a given template. Whenever a path must be resolved
- // due to an import from another template, the base equals the parent template's path.
- Abs(base, name string) string
-
- // Get returns an io.Reader where the template's content can be read from.
- Get(path string) (io.Reader, error)
-}
-
-// TemplateSet allows you to create your own group of templates with their own
-// global context (which is shared among all members of the set) and their own
-// configuration.
-// It's useful for a separation of different kind of templates
-// (e. g. web templates vs. mail templates).
-type TemplateSet struct {
- name string
- loaders []TemplateLoader
-
- // Globals will be provided to all templates created within this template set
- Globals Context
-
- // If debug is true (default false), ExecutionContext.Logf() will work and output
- // to STDOUT. Furthermore, FromCache() won't cache the templates.
- // Make sure to synchronize the access to it in case you're changing this
- // variable during program execution (and template compilation/execution).
- Debug bool
-
- // Options allow you to change the behavior of template-engine.
- // You can change the options before calling the Execute method.
- Options *Options
-
- // Sandbox features
- // - Disallow access to specific tags and/or filters (using BanTag() and BanFilter())
- //
- // For efficiency reasons you can ban tags/filters only *before* you have
- // added your first template to the set (restrictions are statically checked).
- // After you added one, it's not possible anymore (for your personal security).
- firstTemplateCreated bool
- bannedTags map[string]bool
- bannedFilters map[string]bool
-
- // Template cache (for FromCache())
- templateCache map[string]*Template
- templateCacheMutex sync.Mutex
-}
-
-// NewSet can be used to create sets with different kind of templates
-// (e. g. web from mail templates), with different globals or
-// other configurations.
-func NewSet(name string, loaders ...TemplateLoader) *TemplateSet {
- if len(loaders) == 0 {
- panic(fmt.Errorf("at least one template loader must be specified"))
- }
-
- return &TemplateSet{
- name: name,
- loaders: loaders,
- Globals: make(Context),
- bannedTags: make(map[string]bool),
- bannedFilters: make(map[string]bool),
- templateCache: make(map[string]*Template),
- Options: newOptions(),
- }
-}
-
-func (set *TemplateSet) AddLoader(loaders ...TemplateLoader) {
- set.loaders = append(set.loaders, loaders...)
-}
-
-func (set *TemplateSet) resolveFilename(tpl *Template, path string) string {
- return set.resolveFilenameForLoader(set.loaders[0], tpl, path)
-}
-
-func (set *TemplateSet) resolveFilenameForLoader(loader TemplateLoader, tpl *Template, path string) string {
- name := ""
- if tpl != nil && tpl.isTplString {
- return path
- }
- if tpl != nil {
- name = tpl.name
- }
-
- return loader.Abs(name, path)
-}
-
-// BanTag bans a specific tag for this template set. See more in the documentation for TemplateSet.
-func (set *TemplateSet) BanTag(name string) error {
- _, has := tags[name]
- if !has {
- return errors.Errorf("tag '%s' not found", name)
- }
- if set.firstTemplateCreated {
- return errors.New("you cannot ban any tags after you've added your first template to your template set")
- }
- _, has = set.bannedTags[name]
- if has {
- return errors.Errorf("tag '%s' is already banned", name)
- }
- set.bannedTags[name] = true
-
- return nil
-}
-
-// BanFilter bans a specific filter for this template set. See more in the documentation for TemplateSet.
-func (set *TemplateSet) BanFilter(name string) error {
- _, has := filters[name]
- if !has {
- return errors.Errorf("filter '%s' not found", name)
- }
- if set.firstTemplateCreated {
- return errors.New("you cannot ban any filters after you've added your first template to your template set")
- }
- _, has = set.bannedFilters[name]
- if has {
- return errors.Errorf("filter '%s' is already banned", name)
- }
- set.bannedFilters[name] = true
-
- return nil
-}
-
-func (set *TemplateSet) resolveTemplate(tpl *Template, path string) (name string, loader TemplateLoader, fd io.Reader, err error) {
- // iterate over loaders until we appear to have a valid template
- for _, loader = range set.loaders {
- name = set.resolveFilenameForLoader(loader, tpl, path)
- fd, err = loader.Get(name)
- if err == nil {
- return
- }
- }
-
- return path, nil, nil, fmt.Errorf("unable to resolve template")
-}
-
-// CleanCache cleans the template cache. If filenames is not empty,
-// it will remove the template caches of those filenames.
-// Or it will empty the whole template cache. It is thread-safe.
-func (set *TemplateSet) CleanCache(filenames ...string) {
- set.templateCacheMutex.Lock()
- defer set.templateCacheMutex.Unlock()
-
- if len(filenames) == 0 {
- set.templateCache = make(map[string]*Template, len(set.templateCache))
- }
-
- for _, filename := range filenames {
- delete(set.templateCache, set.resolveFilename(nil, filename))
- }
-}
-
-// FromCache is a convenient method to cache templates. It is thread-safe
-// and will only compile the template associated with a filename once.
-// If TemplateSet.Debug is true (for example during development phase),
-// FromCache() will not cache the template and instead recompile it on any
-// call (to make changes to a template live instantaneously).
-func (set *TemplateSet) FromCache(filename string) (*Template, error) {
- if set.Debug {
- // Recompile on any request
- return set.FromFile(filename)
- }
- // Cache the template
- cleanedFilename := set.resolveFilename(nil, filename)
-
- set.templateCacheMutex.Lock()
- defer set.templateCacheMutex.Unlock()
-
- tpl, has := set.templateCache[cleanedFilename]
-
- // Cache miss
- if !has {
- tpl, err := set.FromFile(cleanedFilename)
- if err != nil {
- return nil, err
- }
- set.templateCache[cleanedFilename] = tpl
- return tpl, nil
- }
-
- // Cache hit
- return tpl, nil
-}
-
-// FromString loads a template from string and returns a Template instance.
-func (set *TemplateSet) FromString(tpl string) (*Template, error) {
- set.firstTemplateCreated = true
-
- return newTemplateString(set, []byte(tpl))
-}
-
-// FromBytes loads a template from bytes and returns a Template instance.
-func (set *TemplateSet) FromBytes(tpl []byte) (*Template, error) {
- set.firstTemplateCreated = true
-
- return newTemplateString(set, tpl)
-}
-
-// FromFile loads a template from a filename and returns a Template instance.
-func (set *TemplateSet) FromFile(filename string) (*Template, error) {
- set.firstTemplateCreated = true
-
- _, _, fd, err := set.resolveTemplate(nil, filename)
- if err != nil {
- return nil, &Error{
- Filename: filename,
- Sender: "fromfile",
- OrigError: err,
- }
- }
- buf, err := ioutil.ReadAll(fd)
- if err != nil {
- return nil, &Error{
- Filename: filename,
- Sender: "fromfile",
- OrigError: err,
- }
- }
-
- return newTemplate(set, filename, false, buf)
-}
-
-// RenderTemplateString is a shortcut and renders a template string directly.
-func (set *TemplateSet) RenderTemplateString(s string, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromString(s))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-// RenderTemplateBytes is a shortcut and renders template bytes directly.
-func (set *TemplateSet) RenderTemplateBytes(b []byte, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromBytes(b))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-// RenderTemplateFile is a shortcut and renders a template file directly.
-func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromFile(fn))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-func (set *TemplateSet) logf(format string, args ...interface{}) {
- if set.Debug {
- logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...)
- }
-}
-
-// Logging function (internally used)
-func logf(format string, items ...interface{}) {
- if debug {
- logger.Printf(format, items...)
- }
-}
-
-var (
- debug bool // internal debugging
- logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags|log.Lshortfile)
-
- // DefaultLoader allows the default un-sandboxed access to the local file
- // system and is being used by the DefaultSet.
- DefaultLoader = MustNewLocalFileSystemLoader("")
-
- // DefaultSet is a set created for you for convinience reasons.
- DefaultSet = NewSet("default", DefaultLoader)
-
- // Methods on the default set
- FromString = DefaultSet.FromString
- FromBytes = DefaultSet.FromBytes
- FromFile = DefaultSet.FromFile
- FromCache = DefaultSet.FromCache
- RenderTemplateString = DefaultSet.RenderTemplateString
- RenderTemplateFile = DefaultSet.RenderTemplateFile
-
- // Globals for the default set
- Globals = DefaultSet.Globals
-)
diff --git a/vendor/github.com/flosch/pongo2/value.go b/vendor/github.com/flosch/pongo2/value.go
deleted file mode 100644
index df70bbc80cfd..000000000000
--- a/vendor/github.com/flosch/pongo2/value.go
+++ /dev/null
@@ -1,520 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "reflect"
- "sort"
- "strconv"
- "strings"
-)
-
-type Value struct {
- val reflect.Value
- safe bool // used to indicate whether a Value needs explicit escaping in the template
-}
-
-// AsValue converts any given value to a pongo2.Value
-// Usually being used within own functions passed to a template
-// through a Context or within filter functions.
-//
-// Example:
-// AsValue("my string")
-func AsValue(i interface{}) *Value {
- return &Value{
- val: reflect.ValueOf(i),
- }
-}
-
-// AsSafeValue works like AsValue, but does not apply the 'escape' filter.
-func AsSafeValue(i interface{}) *Value {
- return &Value{
- val: reflect.ValueOf(i),
- safe: true,
- }
-}
-
-func (v *Value) getResolvedValue() reflect.Value {
- if v.val.IsValid() && v.val.Kind() == reflect.Ptr {
- return v.val.Elem()
- }
- return v.val
-}
-
-// IsString checks whether the underlying value is a string
-func (v *Value) IsString() bool {
- return v.getResolvedValue().Kind() == reflect.String
-}
-
-// IsBool checks whether the underlying value is a bool
-func (v *Value) IsBool() bool {
- return v.getResolvedValue().Kind() == reflect.Bool
-}
-
-// IsFloat checks whether the underlying value is a float
-func (v *Value) IsFloat() bool {
- return v.getResolvedValue().Kind() == reflect.Float32 ||
- v.getResolvedValue().Kind() == reflect.Float64
-}
-
-// IsInteger checks whether the underlying value is an integer
-func (v *Value) IsInteger() bool {
- return v.getResolvedValue().Kind() == reflect.Int ||
- v.getResolvedValue().Kind() == reflect.Int8 ||
- v.getResolvedValue().Kind() == reflect.Int16 ||
- v.getResolvedValue().Kind() == reflect.Int32 ||
- v.getResolvedValue().Kind() == reflect.Int64 ||
- v.getResolvedValue().Kind() == reflect.Uint ||
- v.getResolvedValue().Kind() == reflect.Uint8 ||
- v.getResolvedValue().Kind() == reflect.Uint16 ||
- v.getResolvedValue().Kind() == reflect.Uint32 ||
- v.getResolvedValue().Kind() == reflect.Uint64
-}
-
-// IsNumber checks whether the underlying value is either an integer
-// or a float.
-func (v *Value) IsNumber() bool {
- return v.IsInteger() || v.IsFloat()
-}
-
-// IsNil checks whether the underlying value is NIL
-func (v *Value) IsNil() bool {
- //fmt.Printf("%+v\n", v.getResolvedValue().Type().String())
- return !v.getResolvedValue().IsValid()
-}
-
-// String returns a string for the underlying value. If this value is not
-// of type string, pongo2 tries to convert it. Currently the following
-// types for underlying values are supported:
-//
-// 1. string
-// 2. int/uint (any size)
-// 3. float (any precision)
-// 4. bool
-// 5. time.Time
-// 6. String() will be called on the underlying value if provided
-//
-// NIL values will lead to an empty string. Unsupported types are leading
-// to their respective type name.
-func (v *Value) String() string {
- if v.IsNil() {
- return ""
- }
-
- switch v.getResolvedValue().Kind() {
- case reflect.String:
- return v.getResolvedValue().String()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return strconv.FormatInt(v.getResolvedValue().Int(), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return strconv.FormatUint(v.getResolvedValue().Uint(), 10)
- case reflect.Float32, reflect.Float64:
- return fmt.Sprintf("%f", v.getResolvedValue().Float())
- case reflect.Bool:
- if v.Bool() {
- return "True"
- }
- return "False"
- case reflect.Struct:
- if t, ok := v.Interface().(fmt.Stringer); ok {
- return t.String()
- }
- }
-
- logf("Value.String() not implemented for type: %s\n", v.getResolvedValue().Kind().String())
- return v.getResolvedValue().String()
-}
-
-// Integer returns the underlying value as an integer (converts the underlying
-// value, if necessary). If it's not possible to convert the underlying value,
-// it will return 0.
-func (v *Value) Integer() int {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return int(v.getResolvedValue().Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return int(v.getResolvedValue().Uint())
- case reflect.Float32, reflect.Float64:
- return int(v.getResolvedValue().Float())
- case reflect.String:
- // Try to convert from string to int (base 10)
- f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
- if err != nil {
- return 0
- }
- return int(f)
- default:
- logf("Value.Integer() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0
- }
-}
-
-// Float returns the underlying value as a float (converts the underlying
-// value, if necessary). If it's not possible to convert the underlying value,
-// it will return 0.0.
-func (v *Value) Float() float64 {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return float64(v.getResolvedValue().Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return float64(v.getResolvedValue().Uint())
- case reflect.Float32, reflect.Float64:
- return v.getResolvedValue().Float()
- case reflect.String:
- // Try to convert from string to float64 (base 10)
- f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
- if err != nil {
- return 0.0
- }
- return f
- default:
- logf("Value.Float() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0.0
- }
-}
-
-// Bool returns the underlying value as bool. If the value is not bool, false
-// will always be returned. If you're looking for true/false-evaluation of the
-// underlying value, have a look on the IsTrue()-function.
-func (v *Value) Bool() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Bool:
- return v.getResolvedValue().Bool()
- default:
- logf("Value.Bool() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// IsTrue tries to evaluate the underlying value the Pythonic-way:
-//
-// Returns TRUE in one the following cases:
-//
-// * int != 0
-// * uint != 0
-// * float != 0.0
-// * len(array/chan/map/slice/string) > 0
-// * bool == true
-// * underlying value is a struct
-//
-// Otherwise returns always FALSE.
-func (v *Value) IsTrue() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.getResolvedValue().Int() != 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return v.getResolvedValue().Uint() != 0
- case reflect.Float32, reflect.Float64:
- return v.getResolvedValue().Float() != 0
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return v.getResolvedValue().Len() > 0
- case reflect.Bool:
- return v.getResolvedValue().Bool()
- case reflect.Struct:
- return true // struct instance is always true
- default:
- logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// Negate tries to negate the underlying value. It's mainly used for
-// the NOT-operator and in conjunction with a call to
-// return_value.IsTrue() afterwards.
-//
-// Example:
-// AsValue(1).Negate().IsTrue() == false
-func (v *Value) Negate() *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- if v.Integer() != 0 {
- return AsValue(0)
- }
- return AsValue(1)
- case reflect.Float32, reflect.Float64:
- if v.Float() != 0.0 {
- return AsValue(float64(0.0))
- }
- return AsValue(float64(1.1))
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return AsValue(v.getResolvedValue().Len() == 0)
- case reflect.Bool:
- return AsValue(!v.getResolvedValue().Bool())
- case reflect.Struct:
- return AsValue(false)
- default:
- logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue(true)
- }
-}
-
-// Len returns the length for an array, chan, map, slice or string.
-// Otherwise it will return 0.
-func (v *Value) Len() int {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
- return v.getResolvedValue().Len()
- case reflect.String:
- runes := []rune(v.getResolvedValue().String())
- return len(runes)
- default:
- logf("Value.Len() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0
- }
-}
-
-// Slice slices an array, slice or string. Otherwise it will
-// return an empty []int.
-func (v *Value) Slice(i, j int) *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice:
- return AsValue(v.getResolvedValue().Slice(i, j).Interface())
- case reflect.String:
- runes := []rune(v.getResolvedValue().String())
- return AsValue(string(runes[i:j]))
- default:
- logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue([]int{})
- }
-}
-
-// Index gets the i-th item of an array, slice or string. Otherwise
-// it will return NIL.
-func (v *Value) Index(i int) *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice:
- if i >= v.Len() {
- return AsValue(nil)
- }
- return AsValue(v.getResolvedValue().Index(i).Interface())
- case reflect.String:
- //return AsValue(v.getResolvedValue().Slice(i, i+1).Interface())
- s := v.getResolvedValue().String()
- runes := []rune(s)
- if i < len(runes) {
- return AsValue(string(runes[i]))
- }
- return AsValue("")
- default:
- logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue([]int{})
- }
-}
-
-// Contains checks whether the underlying value (which must be of type struct, map,
-// string, array or slice) contains of another Value (e. g. used to check
-// whether a struct contains of a specific field or a map contains a specific key).
-//
-// Example:
-// AsValue("Hello, World!").Contains(AsValue("World")) == true
-func (v *Value) Contains(other *Value) bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Struct:
- fieldValue := v.getResolvedValue().FieldByName(other.String())
- return fieldValue.IsValid()
- case reflect.Map:
- var mapValue reflect.Value
- switch other.Interface().(type) {
- case int:
- mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
- case string:
- mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
- default:
- logf("Value.Contains() does not support lookup type '%s'\n", other.getResolvedValue().Kind().String())
- return false
- }
-
- return mapValue.IsValid()
- case reflect.String:
- return strings.Contains(v.getResolvedValue().String(), other.String())
-
- case reflect.Slice, reflect.Array:
- for i := 0; i < v.getResolvedValue().Len(); i++ {
- item := v.getResolvedValue().Index(i)
- if other.Interface() == item.Interface() {
- return true
- }
- }
- return false
-
- default:
- logf("Value.Contains() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// CanSlice checks whether the underlying value is of type array, slice or string.
-// You normally would use CanSlice() before using the Slice() operation.
-func (v *Value) CanSlice() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice, reflect.String:
- return true
- }
- return false
-}
-
-// Iterate iterates over a map, array, slice or a string. It calls the
-// function's first argument for every value with the following arguments:
-//
-// idx current 0-index
-// count total amount of items
-// key *Value for the key or item
-// value *Value (only for maps, the respective value for a specific key)
-//
-// If the underlying value has no items or is not one of the types above,
-// the empty function (function's second argument) will be called.
-func (v *Value) Iterate(fn func(idx, count int, key, value *Value) bool, empty func()) {
- v.IterateOrder(fn, empty, false, false)
-}
-
-// IterateOrder behaves like Value.Iterate, but can iterate through an array/slice/string in reverse. Does
-// not affect the iteration through a map because maps don't have any particular order.
-// However, you can force an order using the `sorted` keyword (and even use `reversed sorted`).
-func (v *Value) IterateOrder(fn func(idx, count int, key, value *Value) bool, empty func(), reverse bool, sorted bool) {
- switch v.getResolvedValue().Kind() {
- case reflect.Map:
- keys := sortedKeys(v.getResolvedValue().MapKeys())
- if sorted {
- if reverse {
- sort.Sort(sort.Reverse(keys))
- } else {
- sort.Sort(keys)
- }
- }
- keyLen := len(keys)
- for idx, key := range keys {
- value := v.getResolvedValue().MapIndex(key)
- if !fn(idx, keyLen, &Value{val: key}, &Value{val: value}) {
- return
- }
- }
- if keyLen == 0 {
- empty()
- }
- return // done
- case reflect.Array, reflect.Slice:
- var items valuesList
-
- itemCount := v.getResolvedValue().Len()
- for i := 0; i < itemCount; i++ {
- items = append(items, &Value{val: v.getResolvedValue().Index(i)})
- }
-
- if sorted {
- if reverse {
- sort.Sort(sort.Reverse(items))
- } else {
- sort.Sort(items)
- }
- } else {
- if reverse {
- for i := 0; i < itemCount/2; i++ {
- items[i], items[itemCount-1-i] = items[itemCount-1-i], items[i]
- }
- }
- }
-
- if len(items) > 0 {
- for idx, item := range items {
- if !fn(idx, itemCount, item, nil) {
- return
- }
- }
- } else {
- empty()
- }
- return // done
- case reflect.String:
- if sorted {
- // TODO(flosch): Handle sorted
- panic("TODO: handle sort for type string")
- }
-
- // TODO(flosch): Not utf8-compatible (utf8-decoding necessary)
- charCount := v.getResolvedValue().Len()
- if charCount > 0 {
- if reverse {
- for i := charCount - 1; i >= 0; i-- {
- if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
- return
- }
- }
- } else {
- for i := 0; i < charCount; i++ {
- if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
- return
- }
- }
- }
- } else {
- empty()
- }
- return // done
- default:
- logf("Value.Iterate() not available for type: %s\n", v.getResolvedValue().Kind().String())
- }
- empty()
-}
-
-// Interface gives you access to the underlying value.
-func (v *Value) Interface() interface{} {
- if v.val.IsValid() {
- return v.val.Interface()
- }
- return nil
-}
-
-// EqualValueTo checks whether two values are containing the same value or object.
-func (v *Value) EqualValueTo(other *Value) bool {
- // comparison of uint with int fails using .Interface()-comparison (see issue #64)
- if v.IsInteger() && other.IsInteger() {
- return v.Integer() == other.Integer()
- }
- return v.Interface() == other.Interface()
-}
-
-type sortedKeys []reflect.Value
-
-func (sk sortedKeys) Len() int {
- return len(sk)
-}
-
-func (sk sortedKeys) Less(i, j int) bool {
- vi := &Value{val: sk[i]}
- vj := &Value{val: sk[j]}
- switch {
- case vi.IsInteger() && vj.IsInteger():
- return vi.Integer() < vj.Integer()
- case vi.IsFloat() && vj.IsFloat():
- return vi.Float() < vj.Float()
- default:
- return vi.String() < vj.String()
- }
-}
-
-func (sk sortedKeys) Swap(i, j int) {
- sk[i], sk[j] = sk[j], sk[i]
-}
-
-type valuesList []*Value
-
-func (vl valuesList) Len() int {
- return len(vl)
-}
-
-func (vl valuesList) Less(i, j int) bool {
- vi := vl[i]
- vj := vl[j]
- switch {
- case vi.IsInteger() && vj.IsInteger():
- return vi.Integer() < vj.Integer()
- case vi.IsFloat() && vj.IsFloat():
- return vi.Float() < vj.Float()
- default:
- return vi.String() < vj.String()
- }
-}
-
-func (vl valuesList) Swap(i, j int) {
- vl[i], vl[j] = vl[j], vl[i]
-}
diff --git a/vendor/github.com/flosch/pongo2/variable.go b/vendor/github.com/flosch/pongo2/variable.go
deleted file mode 100644
index a506e376e428..000000000000
--- a/vendor/github.com/flosch/pongo2/variable.go
+++ /dev/null
@@ -1,695 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-
- "github.com/juju/errors"
-)
-
-const (
- varTypeInt = iota
- varTypeIdent
-)
-
-var (
- typeOfValuePtr = reflect.TypeOf(new(Value))
- typeOfExecCtxPtr = reflect.TypeOf(new(ExecutionContext))
-)
-
-type variablePart struct {
- typ int
- s string
- i int
-
- isFunctionCall bool
- callingArgs []functionCallArgument // needed for a function call, represents all argument nodes (INode supports nested function calls)
-}
-
-type functionCallArgument interface {
- Evaluate(*ExecutionContext) (*Value, *Error)
-}
-
-// TODO: Add location tokens
-type stringResolver struct {
- locationToken *Token
- val string
-}
-
-type intResolver struct {
- locationToken *Token
- val int
-}
-
-type floatResolver struct {
- locationToken *Token
- val float64
-}
-
-type boolResolver struct {
- locationToken *Token
- val bool
-}
-
-type variableResolver struct {
- locationToken *Token
-
- parts []*variablePart
-}
-
-type nodeFilteredVariable struct {
- locationToken *Token
-
- resolver IEvaluator
- filterChain []*filterCall
-}
-
-type nodeVariable struct {
- locationToken *Token
- expr IEvaluator
-}
-
-type executionCtxEval struct{}
-
-func (v *nodeFilteredVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := v.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (vr *variableResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := vr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (s *stringResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := s.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (i *intResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := i.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (f *floatResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := f.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (b *boolResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := b.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (v *nodeFilteredVariable) GetPositionToken() *Token {
- return v.locationToken
-}
-
-func (vr *variableResolver) GetPositionToken() *Token {
- return vr.locationToken
-}
-
-func (s *stringResolver) GetPositionToken() *Token {
- return s.locationToken
-}
-
-func (i *intResolver) GetPositionToken() *Token {
- return i.locationToken
-}
-
-func (f *floatResolver) GetPositionToken() *Token {
- return f.locationToken
-}
-
-func (b *boolResolver) GetPositionToken() *Token {
- return b.locationToken
-}
-
-func (s *stringResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(s.val), nil
-}
-
-func (i *intResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(i.val), nil
-}
-
-func (f *floatResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(f.val), nil
-}
-
-func (b *boolResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(b.val), nil
-}
-
-func (s *stringResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (i *intResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (f *floatResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (b *boolResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (nv *nodeVariable) FilterApplied(name string) bool {
- return nv.expr.FilterApplied(name)
-}
-
-func (nv *nodeVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := nv.expr.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if !nv.expr.FilterApplied("safe") && !value.safe && value.IsString() && ctx.Autoescape {
- // apply escape filter
- value, err = filters["escape"](value, nil)
- if err != nil {
- return err
- }
- }
-
- writer.WriteString(value.String())
- return nil
-}
-
-func (executionCtxEval) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(ctx), nil
-}
-
-func (vr *variableResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (vr *variableResolver) String() string {
- parts := make([]string, 0, len(vr.parts))
- for _, p := range vr.parts {
- switch p.typ {
- case varTypeInt:
- parts = append(parts, strconv.Itoa(p.i))
- case varTypeIdent:
- parts = append(parts, p.s)
- default:
- panic("unimplemented")
- }
- }
- return strings.Join(parts, ".")
-}
-
-func (vr *variableResolver) resolve(ctx *ExecutionContext) (*Value, error) {
- var current reflect.Value
- var isSafe bool
-
- for idx, part := range vr.parts {
- if idx == 0 {
- // We're looking up the first part of the variable.
- // First we're having a look in our private
- // context (e. g. information provided by tags, like the forloop)
- val, inPrivate := ctx.Private[vr.parts[0].s]
- if !inPrivate {
- // Nothing found? Then have a final lookup in the public context
- val = ctx.Public[vr.parts[0].s]
- }
- current = reflect.ValueOf(val) // Get the initial value
- } else {
- // Next parts, resolve it from current
-
- // Before resolving the pointer, let's see if we have a method to call
- // Problem with resolving the pointer is we're changing the receiver
- isFunc := false
- if part.typ == varTypeIdent {
- funcValue := current.MethodByName(part.s)
- if funcValue.IsValid() {
- current = funcValue
- isFunc = true
- }
- }
-
- if !isFunc {
- // If current a pointer, resolve it
- if current.Kind() == reflect.Ptr {
- current = current.Elem()
- if !current.IsValid() {
- // Value is not valid (anymore)
- return AsValue(nil), nil
- }
- }
-
- // Look up which part must be called now
- switch part.typ {
- case varTypeInt:
- // Calling an index is only possible for:
- // * slices/arrays/strings
- switch current.Kind() {
- case reflect.String, reflect.Array, reflect.Slice:
- if part.i >= 0 && current.Len() > part.i {
- current = current.Index(part.i)
- } else {
- // In Django, exceeding the length of a list is just empty.
- return AsValue(nil), nil
- }
- default:
- return nil, errors.Errorf("Can't access an index on type %s (variable %s)",
- current.Kind().String(), vr.String())
- }
- case varTypeIdent:
- // debugging:
- // fmt.Printf("now = %s (kind: %s)\n", part.s, current.Kind().String())
-
- // Calling a field or key
- switch current.Kind() {
- case reflect.Struct:
- current = current.FieldByName(part.s)
- case reflect.Map:
- current = current.MapIndex(reflect.ValueOf(part.s))
- default:
- return nil, errors.Errorf("Can't access a field by name on type %s (variable %s)",
- current.Kind().String(), vr.String())
- }
- default:
- panic("unimplemented")
- }
- }
- }
-
- if !current.IsValid() {
- // Value is not valid (anymore)
- return AsValue(nil), nil
- }
-
- // If current is a reflect.ValueOf(pongo2.Value), then unpack it
- // Happens in function calls (as a return value) or by injecting
- // into the execution context (e.g. in a for-loop)
- if current.Type() == typeOfValuePtr {
- tmpValue := current.Interface().(*Value)
- current = tmpValue.val
- isSafe = tmpValue.safe
- }
-
- // Check whether this is an interface and resolve it where required
- if current.Kind() == reflect.Interface {
- current = reflect.ValueOf(current.Interface())
- }
-
- // Check if the part is a function call
- if part.isFunctionCall || current.Kind() == reflect.Func {
- // Check for callable
- if current.Kind() != reflect.Func {
- return nil, errors.Errorf("'%s' is not a function (it is %s)", vr.String(), current.Kind().String())
- }
-
- // Check for correct function syntax and types
- // func(*Value, ...) *Value
- t := current.Type()
- currArgs := part.callingArgs
-
- // If an implicit ExecCtx is needed
- if t.NumIn() > 0 && t.In(0) == typeOfExecCtxPtr {
- currArgs = append([]functionCallArgument{executionCtxEval{}}, currArgs...)
- }
-
- // Input arguments
- if len(currArgs) != t.NumIn() && !(len(currArgs) >= t.NumIn()-1 && t.IsVariadic()) {
- return nil,
- errors.Errorf("Function input argument count (%d) of '%s' must be equal to the calling argument count (%d).",
- t.NumIn(), vr.String(), len(currArgs))
- }
-
- // Output arguments
- if t.NumOut() != 1 && t.NumOut() != 2 {
- return nil, errors.Errorf("'%s' must have exactly 1 or 2 output arguments, the second argument must be of type error", vr.String())
- }
-
- // Evaluate all parameters
- var parameters []reflect.Value
-
- numArgs := t.NumIn()
- isVariadic := t.IsVariadic()
- var fnArg reflect.Type
-
- for idx, arg := range currArgs {
- pv, err := arg.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
-
- if isVariadic {
- if idx >= t.NumIn()-1 {
- fnArg = t.In(numArgs - 1).Elem()
- } else {
- fnArg = t.In(idx)
- }
- } else {
- fnArg = t.In(idx)
- }
-
- if fnArg != typeOfValuePtr {
- // Function's argument is not a *pongo2.Value, then we have to check whether input argument is of the same type as the function's argument
- if !isVariadic {
- if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
- return nil, errors.Errorf("Function input argument %d of '%s' must be of type %s or *pongo2.Value (not %T).",
- idx, vr.String(), fnArg.String(), pv.Interface())
- }
- // Function's argument has another type, using the interface-value
- parameters = append(parameters, reflect.ValueOf(pv.Interface()))
- } else {
- if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
- return nil, errors.Errorf("Function variadic input argument of '%s' must be of type %s or *pongo2.Value (not %T).",
- vr.String(), fnArg.String(), pv.Interface())
- }
- // Function's argument has another type, using the interface-value
- parameters = append(parameters, reflect.ValueOf(pv.Interface()))
- }
- } else {
- // Function's argument is a *pongo2.Value
- parameters = append(parameters, reflect.ValueOf(pv))
- }
- }
-
- // Check if any of the values are invalid
- for _, p := range parameters {
- if p.Kind() == reflect.Invalid {
- return nil, errors.Errorf("Calling a function using an invalid parameter")
- }
- }
-
- // Call it and get first return parameter back
- values := current.Call(parameters)
- rv := values[0]
- if t.NumOut() == 2 {
- e := values[1].Interface()
- if e != nil {
- err, ok := e.(error)
- if !ok {
- return nil, errors.Errorf("The second return value is not an error")
- }
- if err != nil {
- return nil, err
- }
- }
- }
-
- if rv.Type() != typeOfValuePtr {
- current = reflect.ValueOf(rv.Interface())
- } else {
- // Return the function call value
- current = rv.Interface().(*Value).val
- isSafe = rv.Interface().(*Value).safe
- }
- }
-
- if !current.IsValid() {
- // Value is not valid (e. g. NIL value)
- return AsValue(nil), nil
- }
- }
-
- return &Value{val: current, safe: isSafe}, nil
-}
-
-func (vr *variableResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- value, err := vr.resolve(ctx)
- if err != nil {
- return AsValue(nil), ctx.Error(err.Error(), vr.locationToken)
- }
- return value, nil
-}
-
-func (v *nodeFilteredVariable) FilterApplied(name string) bool {
- for _, filter := range v.filterChain {
- if filter.name == name {
- return true
- }
- }
- return false
-}
-
-func (v *nodeFilteredVariable) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- value, err := v.resolver.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
-
- for _, filter := range v.filterChain {
- value, err = filter.Execute(value, ctx)
- if err != nil {
- return nil, err
- }
- }
-
- return value, nil
-}
-
-// IDENT | IDENT.(IDENT|NUMBER)...
-func (p *Parser) parseVariableOrLiteral() (IEvaluator, *Error) {
- t := p.Current()
-
- if t == nil {
- return nil, p.Error("Unexpected EOF, expected a number, string, keyword or identifier.", p.lastToken)
- }
-
- // Is first part a number or a string, there's nothing to resolve (because there's only to return the value then)
- switch t.Typ {
- case TokenNumber:
- p.Consume()
-
- // One exception to the rule that we don't have float64 literals is at the beginning
- // of an expression (or a variable name). Since we know we started with an integer
- // which can't obviously be a variable name, we can check whether the first number
- // is followed by dot (and then a number again). If so we're converting it to a float64.
-
- if p.Match(TokenSymbol, ".") != nil {
- // float64
- t2 := p.MatchType(TokenNumber)
- if t2 == nil {
- return nil, p.Error("Expected a number after the '.'.", nil)
- }
- f, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", t.Val, t2.Val), 64)
- if err != nil {
- return nil, p.Error(err.Error(), t)
- }
- fr := &floatResolver{
- locationToken: t,
- val: f,
- }
- return fr, nil
- }
- i, err := strconv.Atoi(t.Val)
- if err != nil {
- return nil, p.Error(err.Error(), t)
- }
- nr := &intResolver{
- locationToken: t,
- val: i,
- }
- return nr, nil
-
- case TokenString:
- p.Consume()
- sr := &stringResolver{
- locationToken: t,
- val: t.Val,
- }
- return sr, nil
- case TokenKeyword:
- p.Consume()
- switch t.Val {
- case "true":
- br := &boolResolver{
- locationToken: t,
- val: true,
- }
- return br, nil
- case "false":
- br := &boolResolver{
- locationToken: t,
- val: false,
- }
- return br, nil
- default:
- return nil, p.Error("This keyword is not allowed here.", nil)
- }
- }
-
- resolver := &variableResolver{
- locationToken: t,
- }
-
- // First part of a variable MUST be an identifier
- if t.Typ != TokenIdentifier {
- return nil, p.Error("Expected either a number, string, keyword or identifier.", t)
- }
-
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeIdent,
- s: t.Val,
- })
-
- p.Consume() // we consumed the first identifier of the variable name
-
-variableLoop:
- for p.Remaining() > 0 {
- t = p.Current()
-
- if p.Match(TokenSymbol, ".") != nil {
- // Next variable part (can be either NUMBER or IDENT)
- t2 := p.Current()
- if t2 != nil {
- switch t2.Typ {
- case TokenIdentifier:
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeIdent,
- s: t2.Val,
- })
- p.Consume() // consume: IDENT
- continue variableLoop
- case TokenNumber:
- i, err := strconv.Atoi(t2.Val)
- if err != nil {
- return nil, p.Error(err.Error(), t2)
- }
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeInt,
- i: i,
- })
- p.Consume() // consume: NUMBER
- continue variableLoop
- default:
- return nil, p.Error("This token is not allowed within a variable name.", t2)
- }
- } else {
- // EOF
- return nil, p.Error("Unexpected EOF, expected either IDENTIFIER or NUMBER after DOT.",
- p.lastToken)
- }
- } else if p.Match(TokenSymbol, "(") != nil {
- // Function call
- // FunctionName '(' Comma-separated list of expressions ')'
- part := resolver.parts[len(resolver.parts)-1]
- part.isFunctionCall = true
- argumentLoop:
- for {
- if p.Remaining() == 0 {
- return nil, p.Error("Unexpected EOF, expected function call argument list.", p.lastToken)
- }
-
- if p.Peek(TokenSymbol, ")") == nil {
- // No closing bracket, so we're parsing an expression
- exprArg, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- part.callingArgs = append(part.callingArgs, exprArg)
-
- if p.Match(TokenSymbol, ")") != nil {
- // If there's a closing bracket after an expression, we will stop parsing the arguments
- break argumentLoop
- } else {
- // If there's NO closing bracket, there MUST be an comma
- if p.Match(TokenSymbol, ",") == nil {
- return nil, p.Error("Missing comma or closing bracket after argument.", nil)
- }
- }
- } else {
- // We got a closing bracket, so stop parsing arguments
- p.Consume()
- break argumentLoop
- }
-
- }
- // We're done parsing the function call, next variable part
- continue variableLoop
- }
-
- // No dot or function call? Then we're done with the variable parsing
- break
- }
-
- return resolver, nil
-}
-
-func (p *Parser) parseVariableOrLiteralWithFilter() (*nodeFilteredVariable, *Error) {
- v := &nodeFilteredVariable{
- locationToken: p.Current(),
- }
-
- // Parse the variable name
- resolver, err := p.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- v.resolver = resolver
-
- // Parse all the filters
-filterLoop:
- for p.Match(TokenSymbol, "|") != nil {
- // Parse one single filter
- filter, err := p.parseFilter()
- if err != nil {
- return nil, err
- }
-
- // Check sandbox filter restriction
- if _, isBanned := p.template.set.bannedFilters[filter.name]; isBanned {
- return nil, p.Error(fmt.Sprintf("Usage of filter '%s' is not allowed (sandbox restriction active).", filter.name), nil)
- }
-
- v.filterChain = append(v.filterChain, filter)
-
- continue filterLoop
- }
-
- return v, nil
-}
-
-func (p *Parser) parseVariableElement() (INode, *Error) {
- node := &nodeVariable{
- locationToken: p.Current(),
- }
-
- p.Consume() // consume '{{'
-
- expr, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- node.expr = expr
-
- if p.Match(TokenSymbol, "}}") == nil {
- return nil, p.Error("'}}' expected", nil)
- }
-
- return node, nil
-}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
new file mode 100644
index 000000000000..6f4a902b5be5
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
@@ -0,0 +1,2806 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+ The code generator for the plugin for the Google protocol buffer compiler.
+ It generates Go code from the protocol buffer description files read by the
+ main routine.
+*/
+package generator
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "log"
+ "os"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap"
+
+ "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+)
+
+// generatedCodeVersion indicates a version of the generated code.
+// It is incremented whenever an incompatibility between the generated code and
+// proto package is introduced; the generated code references
+// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion).
+const generatedCodeVersion = 3
+
+// A Plugin provides functionality to add to the output during Go code generation,
+// such as to produce RPC stubs.
+type Plugin interface {
+ // Name identifies the plugin.
+ Name() string
+ // Init is called once after data structures are built but before
+ // code generation begins.
+ Init(g *Generator)
+ // Generate produces the code generated by the plugin for this file,
+ // except for the imports, by calling the generator's methods P, In, and Out.
+ Generate(file *FileDescriptor)
+ // GenerateImports produces the import declarations for this file.
+ // It is called after Generate.
+ GenerateImports(file *FileDescriptor)
+}
+
+var plugins []Plugin
+
+// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated.
+// It is typically called during initialization.
+func RegisterPlugin(p Plugin) {
+ plugins = append(plugins, p)
+}
+
+// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf".
+type GoImportPath string
+
+func (p GoImportPath) String() string { return strconv.Quote(string(p)) }
+
+// A GoPackageName is the name of a Go package. e.g., "protobuf".
+type GoPackageName string
+
+// Each type we import as a protocol buffer (other than FileDescriptorProto) needs
+// a pointer to the FileDescriptorProto that represents it. These types achieve that
+// wrapping by placing each Proto inside a struct with the pointer to its File. The
+// structs have the same names as their contents, with "Proto" removed.
+// FileDescriptor is used to store the things that it points to.
+
+// The file and package name method are common to messages and enums.
+type common struct {
+ file *FileDescriptor // File this object comes from.
+}
+
+// GoImportPath is the import path of the Go package containing the type.
+func (c *common) GoImportPath() GoImportPath {
+ return c.file.importPath
+}
+
+func (c *common) File() *FileDescriptor { return c.file }
+
+func fileIsProto3(file *descriptor.FileDescriptorProto) bool {
+ return file.GetSyntax() == "proto3"
+}
+
+func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) }
+
+// Descriptor represents a protocol buffer message.
+type Descriptor struct {
+ common
+ *descriptor.DescriptorProto
+ parent *Descriptor // The containing message, if any.
+ nested []*Descriptor // Inner messages, if any.
+ enums []*EnumDescriptor // Inner enums, if any.
+ ext []*ExtensionDescriptor // Extensions, if any.
+ typename []string // Cached typename vector.
+ index int // The index into the container, whether the file or another message.
+ path string // The SourceCodeInfo path as comma-separated integers.
+ group bool
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (d *Descriptor) TypeName() []string {
+ if d.typename != nil {
+ return d.typename
+ }
+ n := 0
+ for parent := d; parent != nil; parent = parent.parent {
+ n++
+ }
+ s := make([]string, n)
+ for parent := d; parent != nil; parent = parent.parent {
+ n--
+ s[n] = parent.GetName()
+ }
+ d.typename = s
+ return s
+}
+
+// EnumDescriptor describes an enum. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type EnumDescriptor struct {
+ common
+ *descriptor.EnumDescriptorProto
+ parent *Descriptor // The containing message, if any.
+ typename []string // Cached typename vector.
+ index int // The index into the container, whether the file or a message.
+ path string // The SourceCodeInfo path as comma-separated integers.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *EnumDescriptor) TypeName() (s []string) {
+ if e.typename != nil {
+ return e.typename
+ }
+ name := e.GetName()
+ if e.parent == nil {
+ s = make([]string, 1)
+ } else {
+ pname := e.parent.TypeName()
+ s = make([]string, len(pname)+1)
+ copy(s, pname)
+ }
+ s[len(s)-1] = name
+ e.typename = s
+ return s
+}
+
+// Everything but the last element of the full type name, CamelCased.
+// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... .
+func (e *EnumDescriptor) prefix() string {
+ if e.parent == nil {
+ // If the enum is not part of a message, the prefix is just the type name.
+ return CamelCase(*e.Name) + "_"
+ }
+ typeName := e.TypeName()
+ return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_"
+}
+
+// The integer value of the named constant in this enumerated type.
+func (e *EnumDescriptor) integerValueAsString(name string) string {
+ for _, c := range e.Value {
+ if c.GetName() == name {
+ return fmt.Sprint(c.GetNumber())
+ }
+ }
+ log.Fatal("cannot find value for enum constant")
+ return ""
+}
+
+// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type ExtensionDescriptor struct {
+ common
+ *descriptor.FieldDescriptorProto
+ parent *Descriptor // The containing message, if any.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *ExtensionDescriptor) TypeName() (s []string) {
+ name := e.GetName()
+ if e.parent == nil {
+ // top-level extension
+ s = make([]string, 1)
+ } else {
+ pname := e.parent.TypeName()
+ s = make([]string, len(pname)+1)
+ copy(s, pname)
+ }
+ s[len(s)-1] = name
+ return s
+}
+
+// DescName returns the variable name used for the generated descriptor.
+func (e *ExtensionDescriptor) DescName() string {
+ // The full type name.
+ typeName := e.TypeName()
+ // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix.
+ for i, s := range typeName {
+ typeName[i] = CamelCase(s)
+ }
+ return "E_" + strings.Join(typeName, "_")
+}
+
+// ImportedDescriptor describes a type that has been publicly imported from another file.
+type ImportedDescriptor struct {
+ common
+ o Object
+}
+
+func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() }
+
+// FileDescriptor describes an protocol buffer descriptor file (.proto).
+// It includes slices of all the messages and enums defined within it.
+// Those slices are constructed by WrapTypes.
+type FileDescriptor struct {
+ *descriptor.FileDescriptorProto
+ desc []*Descriptor // All the messages defined in this file.
+ enum []*EnumDescriptor // All the enums defined in this file.
+ ext []*ExtensionDescriptor // All the top-level extensions defined in this file.
+ imp []*ImportedDescriptor // All types defined in files publicly imported by this file.
+
+ // Comments, stored as a map of path (comma-separated integers) to the comment.
+ comments map[string]*descriptor.SourceCodeInfo_Location
+
+ // The full list of symbols that are exported,
+ // as a map from the exported object to its symbols.
+ // This is used for supporting public imports.
+ exported map[Object][]symbol
+
+ importPath GoImportPath // Import path of this file's package.
+ packageName GoPackageName // Name of this file's Go package.
+
+ proto3 bool // whether to generate proto3 code for this file
+}
+
+// VarName is the variable name we'll use in the generated code to refer
+// to the compressed bytes of this descriptor. It is not exported, so
+// it is only valid inside the generated package.
+func (d *FileDescriptor) VarName() string {
+ h := sha256.Sum256([]byte(d.GetName()))
+ return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8]))
+}
+
+// goPackageOption interprets the file's go_package option.
+// If there is no go_package, it returns ("", "", false).
+// If there's a simple name, it returns ("", pkg, true).
+// If the option implies an import path, it returns (impPath, pkg, true).
+func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) {
+ opt := d.GetOptions().GetGoPackage()
+ if opt == "" {
+ return "", "", false
+ }
+ // A semicolon-delimited suffix delimits the import path and package name.
+ sc := strings.Index(opt, ";")
+ if sc >= 0 {
+ return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true
+ }
+ // The presence of a slash implies there's an import path.
+ slash := strings.LastIndex(opt, "/")
+ if slash >= 0 {
+ return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true
+ }
+ return "", cleanPackageName(opt), true
+}
+
+// goFileName returns the output name for the generated Go file.
+func (d *FileDescriptor) goFileName(pathType pathType) string {
+ name := *d.Name
+ if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" {
+ name = name[:len(name)-len(ext)]
+ }
+ name += ".pb.go"
+
+ if pathType == pathTypeSourceRelative {
+ return name
+ }
+
+ // Does the file have a "go_package" option?
+ // If it does, it may override the filename.
+ if impPath, _, ok := d.goPackageOption(); ok && impPath != "" {
+ // Replace the existing dirname with the declared import path.
+ _, name = path.Split(name)
+ name = path.Join(string(impPath), name)
+ return name
+ }
+
+ return name
+}
+
+func (d *FileDescriptor) addExport(obj Object, sym symbol) {
+ d.exported[obj] = append(d.exported[obj], sym)
+}
+
+// symbol is an interface representing an exported Go symbol.
+type symbol interface {
+ // GenerateAlias should generate an appropriate alias
+ // for the symbol from the named package.
+ GenerateAlias(g *Generator, filename string, pkg GoPackageName)
+}
+
+type messageSymbol struct {
+ sym string
+ hasExtensions, isMessageSet bool
+ oneofTypes []string
+}
+
+type getterSymbol struct {
+ name string
+ typ string
+ typeName string // canonical name in proto world; empty for proto.Message and similar
+ genType bool // whether typ contains a generated type (message/group/enum)
+}
+
+func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
+ g.P("// ", ms.sym, " from public import ", filename)
+ g.P("type ", ms.sym, " = ", pkg, ".", ms.sym)
+ for _, name := range ms.oneofTypes {
+ g.P("type ", name, " = ", pkg, ".", name)
+ }
+}
+
+type enumSymbol struct {
+ name string
+ proto3 bool // Whether this came from a proto3 file.
+}
+
+func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
+ s := es.name
+ g.P("// ", s, " from public import ", filename)
+ g.P("type ", s, " = ", pkg, ".", s)
+ g.P("var ", s, "_name = ", pkg, ".", s, "_name")
+ g.P("var ", s, "_value = ", pkg, ".", s, "_value")
+}
+
+type constOrVarSymbol struct {
+ sym string
+ typ string // either "const" or "var"
+ cast string // if non-empty, a type cast is required (used for enums)
+}
+
+func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
+ v := string(pkg) + "." + cs.sym
+ if cs.cast != "" {
+ v = cs.cast + "(" + v + ")"
+ }
+ g.P(cs.typ, " ", cs.sym, " = ", v)
+}
+
+// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects.
+type Object interface {
+ GoImportPath() GoImportPath
+ TypeName() []string
+ File() *FileDescriptor
+}
+
+// Generator is the type whose methods generate the output, stored in the associated response structure.
+type Generator struct {
+ *bytes.Buffer
+
+ Request *plugin.CodeGeneratorRequest // The input.
+ Response *plugin.CodeGeneratorResponse // The output.
+
+ Param map[string]string // Command-line parameters.
+ PackageImportPath string // Go import path of the package we're generating code for
+ ImportPrefix string // String to prefix to imported package file names.
+ ImportMap map[string]string // Mapping from .proto file name to import path
+
+ Pkg map[string]string // The names under which we import support packages
+
+ outputImportPath GoImportPath // Package we're generating code for.
+ allFiles []*FileDescriptor // All files in the tree
+ allFilesByName map[string]*FileDescriptor // All files by filename.
+ genFiles []*FileDescriptor // Those files we will generate output for.
+ file *FileDescriptor // The file we are compiling now.
+ packageNames map[GoImportPath]GoPackageName // Imported package names in the current file.
+ usedPackages map[GoImportPath]bool // Packages used in current file.
+ usedPackageNames map[GoPackageName]bool // Package names used in the current file.
+ addedImports map[GoImportPath]bool // Additional imports to emit.
+ typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax.
+ init []string // Lines to emit in the init function.
+ indent string
+ pathType pathType // How to generate output filenames.
+ writeOutput bool
+ annotateCode bool // whether to store annotations
+ annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store
+}
+
+type pathType int
+
+const (
+ pathTypeImport pathType = iota
+ pathTypeSourceRelative
+)
+
+// New creates a new generator and allocates the request and response protobufs.
+func New() *Generator {
+ g := new(Generator)
+ g.Buffer = new(bytes.Buffer)
+ g.Request = new(plugin.CodeGeneratorRequest)
+ g.Response = new(plugin.CodeGeneratorResponse)
+ return g
+}
+
+// Error reports a problem, including an error, and exits the program.
+func (g *Generator) Error(err error, msgs ...string) {
+ s := strings.Join(msgs, " ") + ":" + err.Error()
+ log.Print("protoc-gen-go: error:", s)
+ os.Exit(1)
+}
+
+// Fail reports a problem and exits the program.
+func (g *Generator) Fail(msgs ...string) {
+ s := strings.Join(msgs, " ")
+ log.Print("protoc-gen-go: error:", s)
+ os.Exit(1)
+}
+
+// CommandLineParameters breaks the comma-separated list of key=value pairs
+// in the parameter (a member of the request protobuf) into a key/value map.
+// It then sets file name mappings defined by those entries.
+func (g *Generator) CommandLineParameters(parameter string) {
+ g.Param = make(map[string]string)
+ for _, p := range strings.Split(parameter, ",") {
+ if i := strings.Index(p, "="); i < 0 {
+ g.Param[p] = ""
+ } else {
+ g.Param[p[0:i]] = p[i+1:]
+ }
+ }
+
+ g.ImportMap = make(map[string]string)
+ pluginList := "none" // Default list of plugin names to enable (empty means all).
+ for k, v := range g.Param {
+ switch k {
+ case "import_prefix":
+ g.ImportPrefix = v
+ case "import_path":
+ g.PackageImportPath = v
+ case "paths":
+ switch v {
+ case "import":
+ g.pathType = pathTypeImport
+ case "source_relative":
+ g.pathType = pathTypeSourceRelative
+ default:
+ g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v))
+ }
+ case "plugins":
+ pluginList = v
+ case "annotate_code":
+ if v == "true" {
+ g.annotateCode = true
+ }
+ default:
+ if len(k) > 0 && k[0] == 'M' {
+ g.ImportMap[k[1:]] = v
+ }
+ }
+ }
+ if pluginList != "" {
+ // Amend the set of plugins.
+ enabled := make(map[string]bool)
+ for _, name := range strings.Split(pluginList, "+") {
+ enabled[name] = true
+ }
+ var nplugins []Plugin
+ for _, p := range plugins {
+ if enabled[p.Name()] {
+ nplugins = append(nplugins, p)
+ }
+ }
+ plugins = nplugins
+ }
+}
+
+// DefaultPackageName returns the package name printed for the object.
+// If its file is in a different package, it returns the package name we're using for this file, plus ".".
+// Otherwise it returns the empty string.
+func (g *Generator) DefaultPackageName(obj Object) string {
+ importPath := obj.GoImportPath()
+ if importPath == g.outputImportPath {
+ return ""
+ }
+ return string(g.GoPackageName(importPath)) + "."
+}
+
+// GoPackageName returns the name used for a package.
+func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName {
+ if name, ok := g.packageNames[importPath]; ok {
+ return name
+ }
+ name := cleanPackageName(baseName(string(importPath)))
+ for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ {
+ name = orig + GoPackageName(strconv.Itoa(i))
+ }
+ g.packageNames[importPath] = name
+ g.usedPackageNames[name] = true
+ return name
+}
+
+// AddImport adds a package to the generated file's import section.
+// It returns the name used for the package.
+func (g *Generator) AddImport(importPath GoImportPath) GoPackageName {
+ g.addedImports[importPath] = true
+ return g.GoPackageName(importPath)
+}
+
+var globalPackageNames = map[GoPackageName]bool{
+ "fmt": true,
+ "math": true,
+ "proto": true,
+}
+
+// Create and remember a guaranteed unique package name. Pkg is the candidate name.
+// The FileDescriptor parameter is unused.
+func RegisterUniquePackageName(pkg string, f *FileDescriptor) string {
+ name := cleanPackageName(pkg)
+ for i, orig := 1, name; globalPackageNames[name]; i++ {
+ name = orig + GoPackageName(strconv.Itoa(i))
+ }
+ globalPackageNames[name] = true
+ return string(name)
+}
+
+var isGoKeyword = map[string]bool{
+ "break": true,
+ "case": true,
+ "chan": true,
+ "const": true,
+ "continue": true,
+ "default": true,
+ "else": true,
+ "defer": true,
+ "fallthrough": true,
+ "for": true,
+ "func": true,
+ "go": true,
+ "goto": true,
+ "if": true,
+ "import": true,
+ "interface": true,
+ "map": true,
+ "package": true,
+ "range": true,
+ "return": true,
+ "select": true,
+ "struct": true,
+ "switch": true,
+ "type": true,
+ "var": true,
+}
+
+var isGoPredeclaredIdentifier = map[string]bool{
+ "append": true,
+ "bool": true,
+ "byte": true,
+ "cap": true,
+ "close": true,
+ "complex": true,
+ "complex128": true,
+ "complex64": true,
+ "copy": true,
+ "delete": true,
+ "error": true,
+ "false": true,
+ "float32": true,
+ "float64": true,
+ "imag": true,
+ "int": true,
+ "int16": true,
+ "int32": true,
+ "int64": true,
+ "int8": true,
+ "iota": true,
+ "len": true,
+ "make": true,
+ "new": true,
+ "nil": true,
+ "panic": true,
+ "print": true,
+ "println": true,
+ "real": true,
+ "recover": true,
+ "rune": true,
+ "string": true,
+ "true": true,
+ "uint": true,
+ "uint16": true,
+ "uint32": true,
+ "uint64": true,
+ "uint8": true,
+ "uintptr": true,
+}
+
+func cleanPackageName(name string) GoPackageName {
+ name = strings.Map(badToUnderscore, name)
+ // Identifier must not be keyword or predeclared identifier: insert _.
+ if isGoKeyword[name] {
+ name = "_" + name
+ }
+ // Identifier must not begin with digit: insert _.
+ if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) {
+ name = "_" + name
+ }
+ return GoPackageName(name)
+}
+
+// defaultGoPackage returns the package name to use,
+// derived from the import path of the package we're building code for.
+func (g *Generator) defaultGoPackage() GoPackageName {
+ p := g.PackageImportPath
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ return cleanPackageName(p)
+}
+
+// SetPackageNames sets the package name for this run.
+// The package name must agree across all files being generated.
+// It also defines unique package names for all imported files.
+func (g *Generator) SetPackageNames() {
+ g.outputImportPath = g.genFiles[0].importPath
+
+ defaultPackageNames := make(map[GoImportPath]GoPackageName)
+ for _, f := range g.genFiles {
+ if _, p, ok := f.goPackageOption(); ok {
+ defaultPackageNames[f.importPath] = p
+ }
+ }
+ for _, f := range g.genFiles {
+ if _, p, ok := f.goPackageOption(); ok {
+ // Source file: option go_package = "quux/bar";
+ f.packageName = p
+ } else if p, ok := defaultPackageNames[f.importPath]; ok {
+ // A go_package option in another file in the same package.
+ //
+ // This is a poor choice in general, since every source file should
+ // contain a go_package option. Supported mainly for historical
+ // compatibility.
+ f.packageName = p
+ } else if p := g.defaultGoPackage(); p != "" {
+ // Command-line: import_path=quux/bar.
+ //
+ // The import_path flag sets a package name for files which don't
+ // contain a go_package option.
+ f.packageName = p
+ } else if p := f.GetPackage(); p != "" {
+ // Source file: package quux.bar;
+ f.packageName = cleanPackageName(p)
+ } else {
+ // Source filename.
+ f.packageName = cleanPackageName(baseName(f.GetName()))
+ }
+ }
+
+ // Check that all files have a consistent package name and import path.
+ for _, f := range g.genFiles[1:] {
+ if a, b := g.genFiles[0].importPath, f.importPath; a != b {
+ g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b))
+ }
+ if a, b := g.genFiles[0].packageName, f.packageName; a != b {
+ g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b))
+ }
+ }
+
+ // Names of support packages. These never vary (if there are conflicts,
+ // we rename the conflicting package), so this could be removed someday.
+ g.Pkg = map[string]string{
+ "fmt": "fmt",
+ "math": "math",
+ "proto": "proto",
+ }
+}
+
+// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos
+// and FileDescriptorProtos into file-referenced objects within the Generator.
+// It also creates the list of files to generate and so should be called before GenerateAllFiles.
+func (g *Generator) WrapTypes() {
+ g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile))
+ g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles))
+ genFileNames := make(map[string]bool)
+ for _, n := range g.Request.FileToGenerate {
+ genFileNames[n] = true
+ }
+ for _, f := range g.Request.ProtoFile {
+ fd := &FileDescriptor{
+ FileDescriptorProto: f,
+ exported: make(map[Object][]symbol),
+ proto3: fileIsProto3(f),
+ }
+ // The import path may be set in a number of ways.
+ if substitution, ok := g.ImportMap[f.GetName()]; ok {
+ // Command-line: M=foo.proto=quux/bar.
+ //
+ // Explicit mapping of source file to import path.
+ fd.importPath = GoImportPath(substitution)
+ } else if genFileNames[f.GetName()] && g.PackageImportPath != "" {
+ // Command-line: import_path=quux/bar.
+ //
+ // The import_path flag sets the import path for every file that
+ // we generate code for.
+ fd.importPath = GoImportPath(g.PackageImportPath)
+ } else if p, _, _ := fd.goPackageOption(); p != "" {
+ // Source file: option go_package = "quux/bar";
+ //
+ // The go_package option sets the import path. Most users should use this.
+ fd.importPath = p
+ } else {
+ // Source filename.
+ //
+ // Last resort when nothing else is available.
+ fd.importPath = GoImportPath(path.Dir(f.GetName()))
+ }
+ // We must wrap the descriptors before we wrap the enums
+ fd.desc = wrapDescriptors(fd)
+ g.buildNestedDescriptors(fd.desc)
+ fd.enum = wrapEnumDescriptors(fd, fd.desc)
+ g.buildNestedEnums(fd.desc, fd.enum)
+ fd.ext = wrapExtensions(fd)
+ extractComments(fd)
+ g.allFiles = append(g.allFiles, fd)
+ g.allFilesByName[f.GetName()] = fd
+ }
+ for _, fd := range g.allFiles {
+ fd.imp = wrapImported(fd, g)
+ }
+
+ g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate))
+ for _, fileName := range g.Request.FileToGenerate {
+ fd := g.allFilesByName[fileName]
+ if fd == nil {
+ g.Fail("could not find file named", fileName)
+ }
+ g.genFiles = append(g.genFiles, fd)
+ }
+}
+
+// Scan the descriptors in this file. For each one, build the slice of nested descriptors
+func (g *Generator) buildNestedDescriptors(descs []*Descriptor) {
+ for _, desc := range descs {
+ if len(desc.NestedType) != 0 {
+ for _, nest := range descs {
+ if nest.parent == desc {
+ desc.nested = append(desc.nested, nest)
+ }
+ }
+ if len(desc.nested) != len(desc.NestedType) {
+ g.Fail("internal error: nesting failure for", desc.GetName())
+ }
+ }
+ }
+}
+
+func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) {
+ for _, desc := range descs {
+ if len(desc.EnumType) != 0 {
+ for _, enum := range enums {
+ if enum.parent == desc {
+ desc.enums = append(desc.enums, enum)
+ }
+ }
+ if len(desc.enums) != len(desc.EnumType) {
+ g.Fail("internal error: enum nesting failure for", desc.GetName())
+ }
+ }
+ }
+}
+
+// Construct the Descriptor
+func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor {
+ d := &Descriptor{
+ common: common{file},
+ DescriptorProto: desc,
+ parent: parent,
+ index: index,
+ }
+ if parent == nil {
+ d.path = fmt.Sprintf("%d,%d", messagePath, index)
+ } else {
+ d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index)
+ }
+
+ // The only way to distinguish a group from a message is whether
+ // the containing message has a TYPE_GROUP field that matches.
+ if parent != nil {
+ parts := d.TypeName()
+ if file.Package != nil {
+ parts = append([]string{*file.Package}, parts...)
+ }
+ exp := "." + strings.Join(parts, ".")
+ for _, field := range parent.Field {
+ if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp {
+ d.group = true
+ break
+ }
+ }
+ }
+
+ for _, field := range desc.Extension {
+ d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d})
+ }
+
+ return d
+}
+
+// Return a slice of all the Descriptors defined within this file
+func wrapDescriptors(file *FileDescriptor) []*Descriptor {
+ sl := make([]*Descriptor, 0, len(file.MessageType)+10)
+ for i, desc := range file.MessageType {
+ sl = wrapThisDescriptor(sl, desc, nil, file, i)
+ }
+ return sl
+}
+
+// Wrap this Descriptor, recursively
+func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor {
+ sl = append(sl, newDescriptor(desc, parent, file, index))
+ me := sl[len(sl)-1]
+ for i, nested := range desc.NestedType {
+ sl = wrapThisDescriptor(sl, nested, me, file, i)
+ }
+ return sl
+}
+
+// Construct the EnumDescriptor
+func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor {
+ ed := &EnumDescriptor{
+ common: common{file},
+ EnumDescriptorProto: desc,
+ parent: parent,
+ index: index,
+ }
+ if parent == nil {
+ ed.path = fmt.Sprintf("%d,%d", enumPath, index)
+ } else {
+ ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index)
+ }
+ return ed
+}
+
+// Return a slice of all the EnumDescriptors defined within this file
+func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor {
+ sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10)
+ // Top-level enums.
+ for i, enum := range file.EnumType {
+ sl = append(sl, newEnumDescriptor(enum, nil, file, i))
+ }
+ // Enums within messages. Enums within embedded messages appear in the outer-most message.
+ for _, nested := range descs {
+ for i, enum := range nested.EnumType {
+ sl = append(sl, newEnumDescriptor(enum, nested, file, i))
+ }
+ }
+ return sl
+}
+
+// Return a slice of all the top-level ExtensionDescriptors defined within this file.
+func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor {
+ var sl []*ExtensionDescriptor
+ for _, field := range file.Extension {
+ sl = append(sl, &ExtensionDescriptor{common{file}, field, nil})
+ }
+ return sl
+}
+
+// Return a slice of all the types that are publicly imported into this file.
+func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) {
+ for _, index := range file.PublicDependency {
+ df := g.fileByName(file.Dependency[index])
+ for _, d := range df.desc {
+ if d.GetOptions().GetMapEntry() {
+ continue
+ }
+ sl = append(sl, &ImportedDescriptor{common{file}, d})
+ }
+ for _, e := range df.enum {
+ sl = append(sl, &ImportedDescriptor{common{file}, e})
+ }
+ for _, ext := range df.ext {
+ sl = append(sl, &ImportedDescriptor{common{file}, ext})
+ }
+ }
+ return
+}
+
+func extractComments(file *FileDescriptor) {
+ file.comments = make(map[string]*descriptor.SourceCodeInfo_Location)
+ for _, loc := range file.GetSourceCodeInfo().GetLocation() {
+ if loc.LeadingComments == nil {
+ continue
+ }
+ var p []string
+ for _, n := range loc.Path {
+ p = append(p, strconv.Itoa(int(n)))
+ }
+ file.comments[strings.Join(p, ",")] = loc
+ }
+}
+
+// BuildTypeNameMap builds the map from fully qualified type names to objects.
+// The key names for the map come from the input data, which puts a period at the beginning.
+// It should be called after SetPackageNames and before GenerateAllFiles.
+func (g *Generator) BuildTypeNameMap() {
+ g.typeNameToObject = make(map[string]Object)
+ for _, f := range g.allFiles {
+ // The names in this loop are defined by the proto world, not us, so the
+ // package name may be empty. If so, the dotted package name of X will
+ // be ".X"; otherwise it will be ".pkg.X".
+ dottedPkg := "." + f.GetPackage()
+ if dottedPkg != "." {
+ dottedPkg += "."
+ }
+ for _, enum := range f.enum {
+ name := dottedPkg + dottedSlice(enum.TypeName())
+ g.typeNameToObject[name] = enum
+ }
+ for _, desc := range f.desc {
+ name := dottedPkg + dottedSlice(desc.TypeName())
+ g.typeNameToObject[name] = desc
+ }
+ }
+}
+
+// ObjectNamed, given a fully-qualified input type name as it appears in the input data,
+// returns the descriptor for the message or enum with that name.
+func (g *Generator) ObjectNamed(typeName string) Object {
+ o, ok := g.typeNameToObject[typeName]
+ if !ok {
+ g.Fail("can't find object with type", typeName)
+ }
+ return o
+}
+
+// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated.
+type AnnotatedAtoms struct {
+ source string
+ path string
+ atoms []interface{}
+}
+
+// Annotate records the file name and proto AST path of a list of atoms
+// so that a later call to P can emit a link from each atom to its origin.
+func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms {
+ return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms}
+}
+
+// printAtom prints the (atomic, non-annotation) argument to the generated output.
+func (g *Generator) printAtom(v interface{}) {
+ switch v := v.(type) {
+ case string:
+ g.WriteString(v)
+ case *string:
+ g.WriteString(*v)
+ case bool:
+ fmt.Fprint(g, v)
+ case *bool:
+ fmt.Fprint(g, *v)
+ case int:
+ fmt.Fprint(g, v)
+ case *int32:
+ fmt.Fprint(g, *v)
+ case *int64:
+ fmt.Fprint(g, *v)
+ case float64:
+ fmt.Fprint(g, v)
+ case *float64:
+ fmt.Fprint(g, *v)
+ case GoPackageName:
+ g.WriteString(string(v))
+ case GoImportPath:
+ g.WriteString(strconv.Quote(string(v)))
+ default:
+ g.Fail(fmt.Sprintf("unknown type in printer: %T", v))
+ }
+}
+
+// P prints the arguments to the generated output. It handles strings and int32s, plus
+// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit
+// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode
+// is true).
+func (g *Generator) P(str ...interface{}) {
+ if !g.writeOutput {
+ return
+ }
+ g.WriteString(g.indent)
+ for _, v := range str {
+ switch v := v.(type) {
+ case *AnnotatedAtoms:
+ begin := int32(g.Len())
+ for _, v := range v.atoms {
+ g.printAtom(v)
+ }
+ if g.annotateCode {
+ end := int32(g.Len())
+ var path []int32
+ for _, token := range strings.Split(v.path, ",") {
+ val, err := strconv.ParseInt(token, 10, 32)
+ if err != nil {
+ g.Fail("could not parse proto AST path: ", err.Error())
+ }
+ path = append(path, int32(val))
+ }
+ g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{
+ Path: path,
+ SourceFile: &v.source,
+ Begin: &begin,
+ End: &end,
+ })
+ }
+ default:
+ g.printAtom(v)
+ }
+ }
+ g.WriteByte('\n')
+}
+
+// addInitf stores the given statement to be printed inside the file's init function.
+// The statement is given as a format specifier and arguments.
+func (g *Generator) addInitf(stmt string, a ...interface{}) {
+ g.init = append(g.init, fmt.Sprintf(stmt, a...))
+}
+
+// In Indents the output one tab stop.
+func (g *Generator) In() { g.indent += "\t" }
+
+// Out unindents the output one tab stop.
+func (g *Generator) Out() {
+ if len(g.indent) > 0 {
+ g.indent = g.indent[1:]
+ }
+}
+
+// GenerateAllFiles generates the output for all the files we're outputting.
+func (g *Generator) GenerateAllFiles() {
+ // Initialize the plugins
+ for _, p := range plugins {
+ p.Init(g)
+ }
+ // Generate the output. The generator runs for every file, even the files
+ // that we don't generate output for, so that we can collate the full list
+ // of exported symbols to support public imports.
+ genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles))
+ for _, file := range g.genFiles {
+ genFileMap[file] = true
+ }
+ for _, file := range g.allFiles {
+ g.Reset()
+ g.annotations = nil
+ g.writeOutput = genFileMap[file]
+ g.generate(file)
+ if !g.writeOutput {
+ continue
+ }
+ fname := file.goFileName(g.pathType)
+ g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
+ Name: proto.String(fname),
+ Content: proto.String(g.String()),
+ })
+ if g.annotateCode {
+ // Store the generated code annotations in text, as the protoc plugin protocol requires that
+ // strings contain valid UTF-8.
+ g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
+ Name: proto.String(file.goFileName(g.pathType) + ".meta"),
+ Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})),
+ })
+ }
+ }
+}
+
+// Run all the plugins associated with the file.
+func (g *Generator) runPlugins(file *FileDescriptor) {
+ for _, p := range plugins {
+ p.Generate(file)
+ }
+}
+
+// Fill the response protocol buffer with the generated output for all the files we're
+// supposed to generate.
+func (g *Generator) generate(file *FileDescriptor) {
+ g.file = file
+ g.usedPackages = make(map[GoImportPath]bool)
+ g.packageNames = make(map[GoImportPath]GoPackageName)
+ g.usedPackageNames = make(map[GoPackageName]bool)
+ g.addedImports = make(map[GoImportPath]bool)
+ for name := range globalPackageNames {
+ g.usedPackageNames[name] = true
+ }
+
+ g.P("// This is a compile-time assertion to ensure that this generated file")
+ g.P("// is compatible with the proto package it is being compiled against.")
+ g.P("// A compilation error at this line likely means your copy of the")
+ g.P("// proto package needs to be updated.")
+ g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package")
+ g.P()
+
+ for _, td := range g.file.imp {
+ g.generateImported(td)
+ }
+ for _, enum := range g.file.enum {
+ g.generateEnum(enum)
+ }
+ for _, desc := range g.file.desc {
+ // Don't generate virtual messages for maps.
+ if desc.GetOptions().GetMapEntry() {
+ continue
+ }
+ g.generateMessage(desc)
+ }
+ for _, ext := range g.file.ext {
+ g.generateExtension(ext)
+ }
+ g.generateInitFunction()
+ g.generateFileDescriptor(file)
+
+ // Run the plugins before the imports so we know which imports are necessary.
+ g.runPlugins(file)
+
+ // Generate header and imports last, though they appear first in the output.
+ rem := g.Buffer
+ remAnno := g.annotations
+ g.Buffer = new(bytes.Buffer)
+ g.annotations = nil
+ g.generateHeader()
+ g.generateImports()
+ if !g.writeOutput {
+ return
+ }
+ // Adjust the offsets for annotations displaced by the header and imports.
+ for _, anno := range remAnno {
+ *anno.Begin += int32(g.Len())
+ *anno.End += int32(g.Len())
+ g.annotations = append(g.annotations, anno)
+ }
+ g.Write(rem.Bytes())
+
+ // Reformat generated code and patch annotation locations.
+ fset := token.NewFileSet()
+ original := g.Bytes()
+ if g.annotateCode {
+ // make a copy independent of g; we'll need it after Reset.
+ original = append([]byte(nil), original...)
+ }
+ fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments)
+ if err != nil {
+ // Print out the bad code with line numbers.
+ // This should never happen in practice, but it can while changing generated code,
+ // so consider this a debugging aid.
+ var src bytes.Buffer
+ s := bufio.NewScanner(bytes.NewReader(original))
+ for line := 1; s.Scan(); line++ {
+ fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes())
+ }
+ g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String())
+ }
+ ast.SortImports(fset, fileAST)
+ g.Reset()
+ err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST)
+ if err != nil {
+ g.Fail("generated Go source code could not be reformatted:", err.Error())
+ }
+ if g.annotateCode {
+ m, err := remap.Compute(original, g.Bytes())
+ if err != nil {
+ g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error())
+ }
+ for _, anno := range g.annotations {
+ new, ok := m.Find(int(*anno.Begin), int(*anno.End))
+ if !ok {
+ g.Fail("span in formatted generated Go source code could not be mapped back to the original code")
+ }
+ *anno.Begin = int32(new.Pos)
+ *anno.End = int32(new.End)
+ }
+ }
+}
+
+// Generate the header, including package definition
+func (g *Generator) generateHeader() {
+ g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
+ if g.file.GetOptions().GetDeprecated() {
+ g.P("// ", g.file.Name, " is a deprecated file.")
+ } else {
+ g.P("// source: ", g.file.Name)
+ }
+ g.P()
+ g.PrintComments(strconv.Itoa(packagePath))
+ g.P()
+ g.P("package ", g.file.packageName)
+ g.P()
+}
+
+// deprecationComment is the standard comment added to deprecated
+// messages, fields, enums, and enum values.
+var deprecationComment = "// Deprecated: Do not use."
+
+// PrintComments prints any comments from the source .proto file.
+// The path is a comma-separated list of integers.
+// It returns an indication of whether any comments were printed.
+// See descriptor.proto for its format.
+func (g *Generator) PrintComments(path string) bool {
+ if !g.writeOutput {
+ return false
+ }
+ if c, ok := g.makeComments(path); ok {
+ g.P(c)
+ return true
+ }
+ return false
+}
+
+// makeComments generates the comment string for the field, no "\n" at the end
+func (g *Generator) makeComments(path string) (string, bool) {
+ loc, ok := g.file.comments[path]
+ if !ok {
+ return "", false
+ }
+ w := new(bytes.Buffer)
+ nl := ""
+ for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") {
+ fmt.Fprintf(w, "%s//%s", nl, line)
+ nl = "\n"
+ }
+ return w.String(), true
+}
+
+func (g *Generator) fileByName(filename string) *FileDescriptor {
+ return g.allFilesByName[filename]
+}
+
+// weak returns whether the ith import of the current file is a weak import.
+func (g *Generator) weak(i int32) bool {
+ for _, j := range g.file.WeakDependency {
+ if j == i {
+ return true
+ }
+ }
+ return false
+}
+
+// Generate the imports
+func (g *Generator) generateImports() {
+ imports := make(map[GoImportPath]GoPackageName)
+ for i, s := range g.file.Dependency {
+ fd := g.fileByName(s)
+ importPath := fd.importPath
+ // Do not import our own package.
+ if importPath == g.file.importPath {
+ continue
+ }
+ // Do not import weak imports.
+ if g.weak(int32(i)) {
+ continue
+ }
+ // Do not import a package twice.
+ if _, ok := imports[importPath]; ok {
+ continue
+ }
+ // We need to import all the dependencies, even if we don't reference them,
+ // because other code and tools depend on having the full transitive closure
+ // of protocol buffer types in the binary.
+ packageName := g.GoPackageName(importPath)
+ if _, ok := g.usedPackages[importPath]; !ok {
+ packageName = "_"
+ }
+ imports[importPath] = packageName
+ }
+ for importPath := range g.addedImports {
+ imports[importPath] = g.GoPackageName(importPath)
+ }
+ // We almost always need a proto import. Rather than computing when we
+ // do, which is tricky when there's a plugin, just import it and
+ // reference it later. The same argument applies to the fmt and math packages.
+ g.P("import (")
+ g.P(g.Pkg["fmt"] + ` "fmt"`)
+ g.P(g.Pkg["math"] + ` "math"`)
+ g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto")
+ for importPath, packageName := range imports {
+ g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath)
+ }
+ g.P(")")
+ g.P()
+ // TODO: may need to worry about uniqueness across plugins
+ for _, p := range plugins {
+ p.GenerateImports(g.file)
+ g.P()
+ }
+ g.P("// Reference imports to suppress errors if they are not otherwise used.")
+ g.P("var _ = ", g.Pkg["proto"], ".Marshal")
+ g.P("var _ = ", g.Pkg["fmt"], ".Errorf")
+ g.P("var _ = ", g.Pkg["math"], ".Inf")
+ g.P()
+}
+
+func (g *Generator) generateImported(id *ImportedDescriptor) {
+ df := id.o.File()
+ filename := *df.Name
+ if df.importPath == g.file.importPath {
+ // Don't generate type aliases for files in the same Go package as this one.
+ return
+ }
+ if !supportTypeAliases {
+ g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename))
+ }
+ g.usedPackages[df.importPath] = true
+
+ for _, sym := range df.exported[id.o] {
+ sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath))
+ }
+
+ g.P()
+}
+
+// Generate the enum definitions for this EnumDescriptor.
+func (g *Generator) generateEnum(enum *EnumDescriptor) {
+ // The full type name
+ typeName := enum.TypeName()
+ // The full type name, CamelCased.
+ ccTypeName := CamelCaseSlice(typeName)
+ ccPrefix := enum.prefix()
+
+ deprecatedEnum := ""
+ if enum.GetOptions().GetDeprecated() {
+ deprecatedEnum = deprecationComment
+ }
+ g.PrintComments(enum.path)
+ g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum)
+ g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
+ g.P("const (")
+ for i, e := range enum.Value {
+ etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)
+ g.PrintComments(etorPath)
+
+ deprecatedValue := ""
+ if e.GetOptions().GetDeprecated() {
+ deprecatedValue = deprecationComment
+ }
+
+ name := ccPrefix + *e.Name
+ g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue)
+ g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName})
+ }
+ g.P(")")
+ g.P()
+ g.P("var ", ccTypeName, "_name = map[int32]string{")
+ generated := make(map[int32]bool) // avoid duplicate values
+ for _, e := range enum.Value {
+ duplicate := ""
+ if _, present := generated[*e.Number]; present {
+ duplicate = "// Duplicate value: "
+ }
+ g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",")
+ generated[*e.Number] = true
+ }
+ g.P("}")
+ g.P()
+ g.P("var ", ccTypeName, "_value = map[string]int32{")
+ for _, e := range enum.Value {
+ g.P(strconv.Quote(*e.Name), ": ", e.Number, ",")
+ }
+ g.P("}")
+ g.P()
+
+ if !enum.proto3() {
+ g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {")
+ g.P("p := new(", ccTypeName, ")")
+ g.P("*p = x")
+ g.P("return p")
+ g.P("}")
+ g.P()
+ }
+
+ g.P("func (x ", ccTypeName, ") String() string {")
+ g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))")
+ g.P("}")
+ g.P()
+
+ if !enum.proto3() {
+ g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {")
+ g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`)
+ g.P("if err != nil {")
+ g.P("return err")
+ g.P("}")
+ g.P("*x = ", ccTypeName, "(value)")
+ g.P("return nil")
+ g.P("}")
+ g.P()
+ }
+
+ var indexes []string
+ for m := enum.parent; m != nil; m = m.parent {
+ // XXX: skip groups?
+ indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+ }
+ indexes = append(indexes, strconv.Itoa(enum.index))
+ g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {")
+ g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
+ g.P("}")
+ g.P()
+ if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" {
+ g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`)
+ g.P()
+ }
+
+ g.generateEnumRegistration(enum)
+}
+
+// The tag is a string like "varint,2,opt,name=fieldname,def=7" that
+// identifies details of the field for the protocol buffer marshaling and unmarshaling
+// code. The fields are:
+// wire encoding
+// protocol tag number
+// opt,req,rep for optional, required, or repeated
+// packed whether the encoding is "packed" (optional; repeated primitives only)
+// name= the original declared name
+// enum= the name of the enum type if it is an enum-typed field.
+// proto3 if this field is in a proto3 message
+// def= string representation of the default value, if any.
+// The default value must be in a representation that can be used at run-time
+// to generate the default value. Thus bools become 0 and 1, for instance.
+func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string {
+ optrepreq := ""
+ switch {
+ case isOptional(field):
+ optrepreq = "opt"
+ case isRequired(field):
+ optrepreq = "req"
+ case isRepeated(field):
+ optrepreq = "rep"
+ }
+ var defaultValue string
+ if dv := field.DefaultValue; dv != nil { // set means an explicit default
+ defaultValue = *dv
+ // Some types need tweaking.
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ if defaultValue == "true" {
+ defaultValue = "1"
+ } else {
+ defaultValue = "0"
+ }
+ case descriptor.FieldDescriptorProto_TYPE_STRING,
+ descriptor.FieldDescriptorProto_TYPE_BYTES:
+ // Nothing to do. Quoting is done for the whole tag.
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ // For enums we need to provide the integer constant.
+ obj := g.ObjectNamed(field.GetTypeName())
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ // It is an enum that was publicly imported.
+ // We need the underlying type.
+ obj = id.o
+ }
+ enum, ok := obj.(*EnumDescriptor)
+ if !ok {
+ log.Printf("obj is a %T", obj)
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ log.Printf("id.o is a %T", id.o)
+ }
+ g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName()))
+ }
+ defaultValue = enum.integerValueAsString(defaultValue)
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
+ if f, err := strconv.ParseFloat(defaultValue, 32); err == nil {
+ defaultValue = fmt.Sprint(float32(f))
+ }
+ }
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
+ if f, err := strconv.ParseFloat(defaultValue, 64); err == nil {
+ defaultValue = fmt.Sprint(f)
+ }
+ }
+ }
+ defaultValue = ",def=" + defaultValue
+ }
+ enum := ""
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
+ // We avoid using obj.GoPackageName(), because we want to use the
+ // original (proto-world) package name.
+ obj := g.ObjectNamed(field.GetTypeName())
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ obj = id.o
+ }
+ enum = ",enum="
+ if pkg := obj.File().GetPackage(); pkg != "" {
+ enum += pkg + "."
+ }
+ enum += CamelCaseSlice(obj.TypeName())
+ }
+ packed := ""
+ if (field.Options != nil && field.Options.GetPacked()) ||
+ // Per https://developers.google.com/protocol-buffers/docs/proto3#simple:
+ // "In proto3, repeated fields of scalar numeric types use packed encoding by default."
+ (message.proto3() && (field.Options == nil || field.Options.Packed == nil) &&
+ isRepeated(field) && isScalar(field)) {
+ packed = ",packed"
+ }
+ fieldName := field.GetName()
+ name := fieldName
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
+ // We must use the type name for groups instead of
+ // the field name to preserve capitalization.
+ // type_name in FieldDescriptorProto is fully-qualified,
+ // but we only want the local part.
+ name = *field.TypeName
+ if i := strings.LastIndex(name, "."); i >= 0 {
+ name = name[i+1:]
+ }
+ }
+ if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name {
+ // TODO: escaping might be needed, in which case
+ // perhaps this should be in its own "json" tag.
+ name += ",json=" + json
+ }
+ name = ",name=" + name
+ if message.proto3() {
+ name += ",proto3"
+ }
+ oneof := ""
+ if field.OneofIndex != nil {
+ oneof = ",oneof"
+ }
+ return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s",
+ wiretype,
+ field.GetNumber(),
+ optrepreq,
+ packed,
+ name,
+ enum,
+ oneof,
+ defaultValue))
+}
+
+func needsStar(typ descriptor.FieldDescriptorProto_Type) bool {
+ switch typ {
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ return false
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ return false
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return false
+ }
+ return true
+}
+
+// TypeName is the printed name appropriate for an item. If the object is in the current file,
+// TypeName drops the package name and underscores the rest.
+// Otherwise the object is from another package; and the result is the underscored
+// package name followed by the item name.
+// The result always has an initial capital.
+func (g *Generator) TypeName(obj Object) string {
+ return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName())
+}
+
+// GoType returns a string representing the type name, and the wire type
+func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) {
+ // TODO: Options.
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ typ, wire = "float64", "fixed64"
+ case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ typ, wire = "float32", "fixed32"
+ case descriptor.FieldDescriptorProto_TYPE_INT64:
+ typ, wire = "int64", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_UINT64:
+ typ, wire = "uint64", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_INT32:
+ typ, wire = "int32", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_UINT32:
+ typ, wire = "uint32", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ typ, wire = "uint64", "fixed64"
+ case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ typ, wire = "uint32", "fixed32"
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ typ, wire = "bool", "varint"
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ typ, wire = "string", "bytes"
+ case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ desc := g.ObjectNamed(field.GetTypeName())
+ typ, wire = "*"+g.TypeName(desc), "group"
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ desc := g.ObjectNamed(field.GetTypeName())
+ typ, wire = "*"+g.TypeName(desc), "bytes"
+ case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ typ, wire = "[]byte", "bytes"
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ desc := g.ObjectNamed(field.GetTypeName())
+ typ, wire = g.TypeName(desc), "varint"
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ typ, wire = "int32", "fixed32"
+ case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ typ, wire = "int64", "fixed64"
+ case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ typ, wire = "int32", "zigzag32"
+ case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ typ, wire = "int64", "zigzag64"
+ default:
+ g.Fail("unknown type for", field.GetName())
+ }
+ if isRepeated(field) {
+ typ = "[]" + typ
+ } else if message != nil && message.proto3() {
+ return
+ } else if field.OneofIndex != nil && message != nil {
+ return
+ } else if needsStar(*field.Type) {
+ typ = "*" + typ
+ }
+ return
+}
+
+func (g *Generator) RecordTypeUse(t string) {
+ if _, ok := g.typeNameToObject[t]; !ok {
+ return
+ }
+ importPath := g.ObjectNamed(t).GoImportPath()
+ if importPath == g.outputImportPath {
+ // Don't record use of objects in our package.
+ return
+ }
+ g.AddImport(importPath)
+ g.usedPackages[importPath] = true
+}
+
+// Method names that may be generated. Fields with these names get an
+// underscore appended. Any change to this set is a potential incompatible
+// API change because it changes generated field names.
+var methodNames = [...]string{
+ "Reset",
+ "String",
+ "ProtoMessage",
+ "Marshal",
+ "Unmarshal",
+ "ExtensionRangeArray",
+ "ExtensionMap",
+ "Descriptor",
+}
+
+// Names of messages in the `google.protobuf` package for which
+// we will generate XXX_WellKnownType methods.
+var wellKnownTypes = map[string]bool{
+ "Any": true,
+ "Duration": true,
+ "Empty": true,
+ "Struct": true,
+ "Timestamp": true,
+
+ "Value": true,
+ "ListValue": true,
+ "DoubleValue": true,
+ "FloatValue": true,
+ "Int64Value": true,
+ "UInt64Value": true,
+ "Int32Value": true,
+ "UInt32Value": true,
+ "BoolValue": true,
+ "StringValue": true,
+ "BytesValue": true,
+}
+
+// getterDefault finds the default value for the field to return from a getter,
+// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName"
+func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string {
+ if isRepeated(field) {
+ return "nil"
+ }
+ if def := field.GetDefaultValue(); def != "" {
+ defaultConstant := g.defaultConstantName(goMessageType, field.GetName())
+ if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES {
+ return defaultConstant
+ }
+ return "append([]byte(nil), " + defaultConstant + "...)"
+ }
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ return "false"
+ case descriptor.FieldDescriptorProto_TYPE_STRING:
+ return `""`
+ case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES:
+ return "nil"
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ obj := g.ObjectNamed(field.GetTypeName())
+ var enum *EnumDescriptor
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ // The enum type has been publicly imported.
+ enum, _ = id.o.(*EnumDescriptor)
+ } else {
+ enum, _ = obj.(*EnumDescriptor)
+ }
+ if enum == nil {
+ log.Printf("don't know how to generate getter for %s", field.GetName())
+ return "nil"
+ }
+ if len(enum.Value) == 0 {
+ return "0 // empty enum"
+ }
+ first := enum.Value[0].GetName()
+ return g.DefaultPackageName(obj) + enum.prefix() + first
+ default:
+ return "0"
+ }
+}
+
+// defaultConstantName builds the name of the default constant from the message
+// type name and the untouched field name, e.g. "Default_MessageType_FieldName"
+func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string {
+ return "Default_" + goMessageType + "_" + CamelCase(protoFieldName)
+}
+
+// The different types of fields in a message and how to actually print them
+// Most of the logic for generateMessage is in the methods of these types.
+//
+// Note that the content of the field is irrelevant, a simpleField can contain
+// anything from a scalar to a group (which is just a message).
+//
+// Extension fields (and message sets) are however handled separately.
+//
+// simpleField - a field that is neiter weak nor oneof, possibly repeated
+// oneofField - field containing list of subfields:
+// - oneofSubField - a field within the oneof
+
+// msgCtx contains the context for the generator functions.
+type msgCtx struct {
+ goName string // Go struct name of the message, e.g. MessageName
+ message *Descriptor // The descriptor for the message
+}
+
+// fieldCommon contains data common to all types of fields.
+type fieldCommon struct {
+ goName string // Go name of field, e.g. "FieldName" or "Descriptor_"
+ protoName string // Name of field in proto language, e.g. "field_name" or "descriptor"
+ getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_"
+ goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage"
+ tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"`
+ fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0"
+}
+
+// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor".
+func (f *fieldCommon) getProtoName() string {
+ return f.protoName
+}
+
+// getGoType returns the go type of the field as a string, e.g. "*int32".
+func (f *fieldCommon) getGoType() string {
+ return f.goType
+}
+
+// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated.
+type simpleField struct {
+ fieldCommon
+ protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
+ protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+ deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use."
+ getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
+ protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5"
+ comment string // The full comment for the field, e.g. "// Useful information"
+}
+
+// decl prints the declaration of the field in the struct (if any).
+func (f *simpleField) decl(g *Generator, mc *msgCtx) {
+ g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated)
+}
+
+// getter prints the getter for the field.
+func (f *simpleField) getter(g *Generator, mc *msgCtx) {
+ star := ""
+ tname := f.goType
+ if needsStar(f.protoType) && tname[0] == '*' {
+ tname = tname[1:]
+ star = "*"
+ }
+ if f.deprecated != "" {
+ g.P(f.deprecated)
+ }
+ g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {")
+ if f.getterDef == "nil" { // Simpler getter
+ g.P("if m != nil {")
+ g.P("return m." + f.goName)
+ g.P("}")
+ g.P("return nil")
+ g.P("}")
+ g.P()
+ return
+ }
+ if mc.message.proto3() {
+ g.P("if m != nil {")
+ } else {
+ g.P("if m != nil && m." + f.goName + " != nil {")
+ }
+ g.P("return " + star + "m." + f.goName)
+ g.P("}")
+ g.P("return ", f.getterDef)
+ g.P("}")
+ g.P()
+}
+
+// setter prints the setter method of the field.
+func (f *simpleField) setter(g *Generator, mc *msgCtx) {
+ // No setter for regular fields yet
+}
+
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
+func (f *simpleField) getProtoDef() string {
+ return f.protoDef
+}
+
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
+func (f *simpleField) getProtoTypeName() string {
+ return f.protoTypeName
+}
+
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
+func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type {
+ return f.protoType
+}
+
+// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message.
+type oneofSubField struct {
+ fieldCommon
+ protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
+ protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+ oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName"
+ fieldNumber int // Actual field number, as defined in proto, e.g. 12
+ getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
+ protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5"
+ deprecated string // Deprecation comment, if any.
+}
+
+// typedNil prints a nil casted to the pointer to this field.
+// - for XXX_OneofWrappers
+func (f *oneofSubField) typedNil(g *Generator) {
+ g.P("(*", f.oneofTypeName, ")(nil),")
+}
+
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
+func (f *oneofSubField) getProtoDef() string {
+ return f.protoDef
+}
+
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
+func (f *oneofSubField) getProtoTypeName() string {
+ return f.protoTypeName
+}
+
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
+func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type {
+ return f.protoType
+}
+
+// oneofField represents the oneof on top level.
+// The alternative fields within the oneof are represented by oneofSubField.
+type oneofField struct {
+ fieldCommon
+ subFields []*oneofSubField // All the possible oneof fields
+ comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\"
+}
+
+// decl prints the declaration of the field in the struct (if any).
+func (f *oneofField) decl(g *Generator, mc *msgCtx) {
+ comment := f.comment
+ for _, sf := range f.subFields {
+ comment += "//\t*" + sf.oneofTypeName + "\n"
+ }
+ g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`")
+}
+
+// getter for a oneof field will print additional discriminators and interfaces for the oneof,
+// also it prints all the getters for the sub fields.
+func (f *oneofField) getter(g *Generator, mc *msgCtx) {
+ // The discriminator type
+ g.P("type ", f.goType, " interface {")
+ g.P(f.goType, "()")
+ g.P("}")
+ g.P()
+ // The subField types, fulfilling the discriminator type contract
+ for _, sf := range f.subFields {
+ g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {")
+ g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`")
+ g.P("}")
+ g.P()
+ }
+ for _, sf := range f.subFields {
+ g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}")
+ g.P()
+ }
+ // Getter for the oneof field
+ g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {")
+ g.P("if m != nil { return m.", f.goName, " }")
+ g.P("return nil")
+ g.P("}")
+ g.P()
+ // Getters for each oneof
+ for _, sf := range f.subFields {
+ if sf.deprecated != "" {
+ g.P(sf.deprecated)
+ }
+ g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {")
+ g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {")
+ g.P("return x.", sf.goName)
+ g.P("}")
+ g.P("return ", sf.getterDef)
+ g.P("}")
+ g.P()
+ }
+}
+
+// setter prints the setter method of the field.
+func (f *oneofField) setter(g *Generator, mc *msgCtx) {
+ // No setters for oneof yet
+}
+
+// topLevelField interface implemented by all types of fields on the top level (not oneofSubField).
+type topLevelField interface {
+ decl(g *Generator, mc *msgCtx) // print declaration within the struct
+ getter(g *Generator, mc *msgCtx) // print getter
+ setter(g *Generator, mc *msgCtx) // print setter if applicable
+}
+
+// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField).
+type defField interface {
+ getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5"
+ getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor"
+ getGoType() string // go type of the field as a string, e.g. "*int32"
+ getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration"
+ getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+}
+
+// generateDefaultConstants adds constants for default values if needed, which is only if the default value is.
+// explicit in the proto.
+func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) {
+ // Collect fields that can have defaults
+ dFields := []defField{}
+ for _, pf := range topLevelFields {
+ if f, ok := pf.(*oneofField); ok {
+ for _, osf := range f.subFields {
+ dFields = append(dFields, osf)
+ }
+ continue
+ }
+ dFields = append(dFields, pf.(defField))
+ }
+ for _, df := range dFields {
+ def := df.getProtoDef()
+ if def == "" {
+ continue
+ }
+ fieldname := g.defaultConstantName(mc.goName, df.getProtoName())
+ typename := df.getGoType()
+ if typename[0] == '*' {
+ typename = typename[1:]
+ }
+ kind := "const "
+ switch {
+ case typename == "bool":
+ case typename == "string":
+ def = strconv.Quote(def)
+ case typename == "[]byte":
+ def = "[]byte(" + strconv.Quote(unescape(def)) + ")"
+ kind = "var "
+ case def == "inf", def == "-inf", def == "nan":
+ // These names are known to, and defined by, the protocol language.
+ switch def {
+ case "inf":
+ def = "math.Inf(1)"
+ case "-inf":
+ def = "math.Inf(-1)"
+ case "nan":
+ def = "math.NaN()"
+ }
+ if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT {
+ def = "float32(" + def + ")"
+ }
+ kind = "var "
+ case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ if f, err := strconv.ParseFloat(def, 32); err == nil {
+ def = fmt.Sprint(float32(f))
+ }
+ case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ if f, err := strconv.ParseFloat(def, 64); err == nil {
+ def = fmt.Sprint(f)
+ }
+ case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM:
+ // Must be an enum. Need to construct the prefixed name.
+ obj := g.ObjectNamed(df.getProtoTypeName())
+ var enum *EnumDescriptor
+ if id, ok := obj.(*ImportedDescriptor); ok {
+ // The enum type has been publicly imported.
+ enum, _ = id.o.(*EnumDescriptor)
+ } else {
+ enum, _ = obj.(*EnumDescriptor)
+ }
+ if enum == nil {
+ log.Printf("don't know how to generate constant for %s", fieldname)
+ continue
+ }
+ def = g.DefaultPackageName(obj) + enum.prefix() + def
+ }
+ g.P(kind, fieldname, " ", typename, " = ", def)
+ g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""})
+ }
+ g.P()
+}
+
+// generateInternalStructFields just adds the XXX_ fields to the message struct.
+func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) {
+ g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals
+ if len(mc.message.ExtensionRange) > 0 {
+ messageset := ""
+ if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() {
+ messageset = "protobuf_messageset:\"1\" "
+ }
+ g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`")
+ }
+ g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
+ g.P("XXX_sizecache\tint32 `json:\"-\"`")
+
+}
+
+// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer.
+func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) {
+ ofields := []*oneofField{}
+ for _, f := range topLevelFields {
+ if o, ok := f.(*oneofField); ok {
+ ofields = append(ofields, o)
+ }
+ }
+ if len(ofields) == 0 {
+ return
+ }
+
+ // OneofFuncs
+ g.P("// XXX_OneofWrappers is for the internal use of the proto package.")
+ g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {")
+ g.P("return []interface{}{")
+ for _, of := range ofields {
+ for _, sf := range of.subFields {
+ sf.typedNil(g)
+ }
+ }
+ g.P("}")
+ g.P("}")
+ g.P()
+}
+
+// generateMessageStruct adds the actual struct with it's members (but not methods) to the output.
+func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) {
+ comments := g.PrintComments(mc.message.path)
+
+ // Guarantee deprecation comments appear after user-provided comments.
+ if mc.message.GetOptions().GetDeprecated() {
+ if comments {
+ // Convention: Separate deprecation comments from original
+ // comments with an empty line.
+ g.P("//")
+ }
+ g.P(deprecationComment)
+ }
+
+ g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {")
+ for _, pf := range topLevelFields {
+ pf.decl(g, mc)
+ }
+ g.generateInternalStructFields(mc, topLevelFields)
+ g.P("}")
+}
+
+// generateGetters adds getters for all fields, including oneofs and weak fields when applicable.
+func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) {
+ for _, pf := range topLevelFields {
+ pf.getter(g, mc)
+ }
+}
+
+// generateSetters add setters for all fields, including oneofs and weak fields when applicable.
+func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) {
+ for _, pf := range topLevelFields {
+ pf.setter(g, mc)
+ }
+}
+
+// generateCommonMethods adds methods to the message that are not on a per field basis.
+func (g *Generator) generateCommonMethods(mc *msgCtx) {
+ // Reset, String and ProtoMessage methods.
+ g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }")
+ g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }")
+ g.P("func (*", mc.goName, ") ProtoMessage() {}")
+ var indexes []string
+ for m := mc.message; m != nil; m = m.parent {
+ indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+ }
+ g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {")
+ g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
+ g.P("}")
+ g.P()
+ // TODO: Revisit the decision to use a XXX_WellKnownType method
+ // if we change proto.MessageName to work with multiple equivalents.
+ if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] {
+ g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`)
+ g.P()
+ }
+
+ // Extension support methods
+ if len(mc.message.ExtensionRange) > 0 {
+ g.P()
+ g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{")
+ for _, r := range mc.message.ExtensionRange {
+ end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends
+ g.P("{Start: ", r.Start, ", End: ", end, "},")
+ }
+ g.P("}")
+ g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {")
+ g.P("return extRange_", mc.goName)
+ g.P("}")
+ g.P()
+ }
+
+ // TODO: It does not scale to keep adding another method for every
+ // operation on protos that we want to switch over to using the
+ // table-driven approach. Instead, we should only add a single method
+ // that allows getting access to the *InternalMessageInfo struct and then
+ // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that.
+
+ // Wrapper for table-driven marshaling and unmarshaling.
+ g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {")
+ g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)")
+ g.P("}")
+
+ g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {")
+ g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)")
+ g.P("}")
+
+ g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {")
+ g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)")
+ g.P("}")
+
+ g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message
+ g.P("return xxx_messageInfo_", mc.goName, ".Size(m)")
+ g.P("}")
+
+ g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {")
+ g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)")
+ g.P("}")
+
+ g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo")
+ g.P()
+}
+
+// Generate the type, methods and default constant definitions for this Descriptor.
+func (g *Generator) generateMessage(message *Descriptor) {
+ topLevelFields := []topLevelField{}
+ oFields := make(map[int32]*oneofField)
+ // The full type name
+ typeName := message.TypeName()
+ // The full type name, CamelCased.
+ goTypeName := CamelCaseSlice(typeName)
+
+ usedNames := make(map[string]bool)
+ for _, n := range methodNames {
+ usedNames[n] = true
+ }
+
+ // allocNames finds a conflict-free variation of the given strings,
+ // consistently mutating their suffixes.
+ // It returns the same number of strings.
+ allocNames := func(ns ...string) []string {
+ Loop:
+ for {
+ for _, n := range ns {
+ if usedNames[n] {
+ for i := range ns {
+ ns[i] += "_"
+ }
+ continue Loop
+ }
+ }
+ for _, n := range ns {
+ usedNames[n] = true
+ }
+ return ns
+ }
+ }
+
+ mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later
+
+ // Build a structure more suitable for generating the text in one pass
+ for i, field := range message.Field {
+ // Allocate the getter and the field at the same time so name
+ // collisions create field/method consistent names.
+ // TODO: This allocation occurs based on the order of the fields
+ // in the proto file, meaning that a change in the field
+ // ordering can change generated Method/Field names.
+ base := CamelCase(*field.Name)
+ ns := allocNames(base, "Get"+base)
+ fieldName, fieldGetterName := ns[0], ns[1]
+ typename, wiretype := g.GoType(message, field)
+ jsonName := *field.Name
+ tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty")
+
+ oneof := field.OneofIndex != nil
+ if oneof && oFields[*field.OneofIndex] == nil {
+ odp := message.OneofDecl[int(*field.OneofIndex)]
+ base := CamelCase(odp.GetName())
+ fname := allocNames(base)[0]
+
+ // This is the first field of a oneof we haven't seen before.
+ // Generate the union field.
+ oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)
+ c, ok := g.makeComments(oneofFullPath)
+ if ok {
+ c += "\n//\n"
+ }
+ c += "// Types that are valid to be assigned to " + fname + ":\n"
+ // Generate the rest of this comment later,
+ // when we've computed any disambiguation.
+
+ dname := "is" + goTypeName + "_" + fname
+ tag := `protobuf_oneof:"` + odp.GetName() + `"`
+ of := oneofField{
+ fieldCommon: fieldCommon{
+ goName: fname,
+ getterName: "Get"+fname,
+ goType: dname,
+ tags: tag,
+ protoName: odp.GetName(),
+ fullPath: oneofFullPath,
+ },
+ comment: c,
+ }
+ topLevelFields = append(topLevelFields, &of)
+ oFields[*field.OneofIndex] = &of
+ }
+
+ if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
+ desc := g.ObjectNamed(field.GetTypeName())
+ if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
+ // Figure out the Go types and tags for the key and value types.
+ keyField, valField := d.Field[0], d.Field[1]
+ keyType, keyWire := g.GoType(d, keyField)
+ valType, valWire := g.GoType(d, valField)
+ keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire)
+
+ // We don't use stars, except for message-typed values.
+ // Message and enum types are the only two possibly foreign types used in maps,
+ // so record their use. They are not permitted as map keys.
+ keyType = strings.TrimPrefix(keyType, "*")
+ switch *valField.Type {
+ case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ valType = strings.TrimPrefix(valType, "*")
+ g.RecordTypeUse(valField.GetTypeName())
+ case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ g.RecordTypeUse(valField.GetTypeName())
+ default:
+ valType = strings.TrimPrefix(valType, "*")
+ }
+
+ typename = fmt.Sprintf("map[%s]%s", keyType, valType)
+ mapFieldTypes[field] = typename // record for the getter generation
+
+ tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag)
+ }
+ }
+
+ fieldDeprecated := ""
+ if field.GetOptions().GetDeprecated() {
+ fieldDeprecated = deprecationComment
+ }
+
+ dvalue := g.getterDefault(field, goTypeName)
+ if oneof {
+ tname := goTypeName + "_" + fieldName
+ // It is possible for this to collide with a message or enum
+ // nested in this message. Check for collisions.
+ for {
+ ok := true
+ for _, desc := range message.nested {
+ if CamelCaseSlice(desc.TypeName()) == tname {
+ ok = false
+ break
+ }
+ }
+ for _, enum := range message.enums {
+ if CamelCaseSlice(enum.TypeName()) == tname {
+ ok = false
+ break
+ }
+ }
+ if !ok {
+ tname += "_"
+ continue
+ }
+ break
+ }
+
+ oneofField := oFields[*field.OneofIndex]
+ tag := "protobuf:" + g.goTag(message, field, wiretype)
+ sf := oneofSubField{
+ fieldCommon: fieldCommon{
+ goName: fieldName,
+ getterName: fieldGetterName,
+ goType: typename,
+ tags: tag,
+ protoName: field.GetName(),
+ fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i),
+ },
+ protoTypeName: field.GetTypeName(),
+ fieldNumber: int(*field.Number),
+ protoType: *field.Type,
+ getterDef: dvalue,
+ protoDef: field.GetDefaultValue(),
+ oneofTypeName: tname,
+ deprecated: fieldDeprecated,
+ }
+ oneofField.subFields = append(oneofField.subFields, &sf)
+ g.RecordTypeUse(field.GetTypeName())
+ continue
+ }
+
+ fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)
+ c, ok := g.makeComments(fieldFullPath)
+ if ok {
+ c += "\n"
+ }
+ rf := simpleField{
+ fieldCommon: fieldCommon{
+ goName: fieldName,
+ getterName: fieldGetterName,
+ goType: typename,
+ tags: tag,
+ protoName: field.GetName(),
+ fullPath: fieldFullPath,
+ },
+ protoTypeName: field.GetTypeName(),
+ protoType: *field.Type,
+ deprecated: fieldDeprecated,
+ getterDef: dvalue,
+ protoDef: field.GetDefaultValue(),
+ comment: c,
+ }
+ var pf topLevelField = &rf
+
+ topLevelFields = append(topLevelFields, pf)
+ g.RecordTypeUse(field.GetTypeName())
+ }
+
+ mc := &msgCtx{
+ goName: goTypeName,
+ message: message,
+ }
+
+ g.generateMessageStruct(mc, topLevelFields)
+ g.P()
+ g.generateCommonMethods(mc)
+ g.P()
+ g.generateDefaultConstants(mc, topLevelFields)
+ g.P()
+ g.generateGetters(mc, topLevelFields)
+ g.P()
+ g.generateSetters(mc, topLevelFields)
+ g.P()
+ g.generateOneofFuncs(mc, topLevelFields)
+ g.P()
+
+ var oneofTypes []string
+ for _, f := range topLevelFields {
+ if of, ok := f.(*oneofField); ok {
+ for _, osf := range of.subFields {
+ oneofTypes = append(oneofTypes, osf.oneofTypeName)
+ }
+ }
+ }
+
+ opts := message.Options
+ ms := &messageSymbol{
+ sym: goTypeName,
+ hasExtensions: len(message.ExtensionRange) > 0,
+ isMessageSet: opts != nil && opts.GetMessageSetWireFormat(),
+ oneofTypes: oneofTypes,
+ }
+ g.file.addExport(message, ms)
+
+ for _, ext := range message.ext {
+ g.generateExtension(ext)
+ }
+
+ fullName := strings.Join(message.TypeName(), ".")
+ if g.file.Package != nil {
+ fullName = *g.file.Package + "." + fullName
+ }
+
+ g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName)
+ // Register types for native map types.
+ for _, k := range mapFieldKeys(mapFieldTypes) {
+ fullName := strings.TrimPrefix(*k.TypeName, ".")
+ g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName)
+ }
+
+}
+
+type byTypeName []*descriptor.FieldDescriptorProto
+
+func (a byTypeName) Len() int { return len(a) }
+func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName }
+
+// mapFieldKeys returns the keys of m in a consistent order.
+func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto {
+ keys := make([]*descriptor.FieldDescriptorProto, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Sort(byTypeName(keys))
+ return keys
+}
+
+var escapeChars = [256]byte{
+ 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?',
+}
+
+// unescape reverses the "C" escaping that protoc does for default values of bytes fields.
+// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape
+// sequences are conveyed, unmodified, into the decoded result.
+func unescape(s string) string {
+ // NB: Sadly, we can't use strconv.Unquote because protoc will escape both
+ // single and double quotes, but strconv.Unquote only allows one or the
+ // other (based on actual surrounding quotes of its input argument).
+
+ var out []byte
+ for len(s) > 0 {
+ // regular character, or too short to be valid escape
+ if s[0] != '\\' || len(s) < 2 {
+ out = append(out, s[0])
+ s = s[1:]
+ } else if c := escapeChars[s[1]]; c != 0 {
+ // escape sequence
+ out = append(out, c)
+ s = s[2:]
+ } else if s[1] == 'x' || s[1] == 'X' {
+ // hex escape, e.g. "\x80
+ if len(s) < 4 {
+ // too short to be valid
+ out = append(out, s[:2]...)
+ s = s[2:]
+ continue
+ }
+ v, err := strconv.ParseUint(s[2:4], 16, 8)
+ if err != nil {
+ out = append(out, s[:4]...)
+ } else {
+ out = append(out, byte(v))
+ }
+ s = s[4:]
+ } else if '0' <= s[1] && s[1] <= '7' {
+ // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164"
+ // so consume up to 2 more bytes or up to end-of-string
+ n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567"))
+ if n > 3 {
+ n = 3
+ }
+ v, err := strconv.ParseUint(s[1:1+n], 8, 8)
+ if err != nil {
+ out = append(out, s[:1+n]...)
+ } else {
+ out = append(out, byte(v))
+ }
+ s = s[1+n:]
+ } else {
+ // bad escape, just propagate the slash as-is
+ out = append(out, s[0])
+ s = s[1:]
+ }
+ }
+
+ return string(out)
+}
+
+func (g *Generator) generateExtension(ext *ExtensionDescriptor) {
+ ccTypeName := ext.DescName()
+
+ extObj := g.ObjectNamed(*ext.Extendee)
+ var extDesc *Descriptor
+ if id, ok := extObj.(*ImportedDescriptor); ok {
+ // This is extending a publicly imported message.
+ // We need the underlying type for goTag.
+ extDesc = id.o.(*Descriptor)
+ } else {
+ extDesc = extObj.(*Descriptor)
+ }
+ extendedType := "*" + g.TypeName(extObj) // always use the original
+ field := ext.FieldDescriptorProto
+ fieldType, wireType := g.GoType(ext.parent, field)
+ tag := g.goTag(extDesc, field, wireType)
+ g.RecordTypeUse(*ext.Extendee)
+ if n := ext.FieldDescriptorProto.TypeName; n != nil {
+ // foreign extension type
+ g.RecordTypeUse(*n)
+ }
+
+ typeName := ext.TypeName()
+
+ // Special case for proto2 message sets: If this extension is extending
+ // proto2.bridge.MessageSet, and its final name component is "message_set_extension",
+ // then drop that last component.
+ //
+ // TODO: This should be implemented in the text formatter rather than the generator.
+ // In addition, the situation for when to apply this special case is implemented
+ // differently in other languages:
+ // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560
+ if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" {
+ typeName = typeName[:len(typeName)-1]
+ }
+
+ // For text formatting, the package must be exactly what the .proto file declares,
+ // ignoring overrides such as the go_package option, and with no dot/underscore mapping.
+ extName := strings.Join(typeName, ".")
+ if g.file.Package != nil {
+ extName = *g.file.Package + "." + extName
+ }
+
+ g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{")
+ g.P("ExtendedType: (", extendedType, ")(nil),")
+ g.P("ExtensionType: (", fieldType, ")(nil),")
+ g.P("Field: ", field.Number, ",")
+ g.P(`Name: "`, extName, `",`)
+ g.P("Tag: ", tag, ",")
+ g.P(`Filename: "`, g.file.GetName(), `",`)
+
+ g.P("}")
+ g.P()
+
+ g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName())
+
+ g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""})
+}
+
+func (g *Generator) generateInitFunction() {
+ if len(g.init) == 0 {
+ return
+ }
+ g.P("func init() {")
+ for _, l := range g.init {
+ g.P(l)
+ }
+ g.P("}")
+ g.init = nil
+}
+
+func (g *Generator) generateFileDescriptor(file *FileDescriptor) {
+ // Make a copy and trim source_code_info data.
+ // TODO: Trim this more when we know exactly what we need.
+ pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto)
+ pb.SourceCodeInfo = nil
+
+ b, err := proto.Marshal(pb)
+ if err != nil {
+ g.Fail(err.Error())
+ }
+
+ var buf bytes.Buffer
+ w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
+ w.Write(b)
+ w.Close()
+ b = buf.Bytes()
+
+ v := file.VarName()
+ g.P()
+ g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }")
+ g.P("var ", v, " = []byte{")
+ g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto")
+ for len(b) > 0 {
+ n := 16
+ if n > len(b) {
+ n = len(b)
+ }
+
+ s := ""
+ for _, c := range b[:n] {
+ s += fmt.Sprintf("0x%02x,", c)
+ }
+ g.P(s)
+
+ b = b[n:]
+ }
+ g.P("}")
+}
+
+func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) {
+ // // We always print the full (proto-world) package name here.
+ pkg := enum.File().GetPackage()
+ if pkg != "" {
+ pkg += "."
+ }
+ // The full type name
+ typeName := enum.TypeName()
+ // The full type name, CamelCased.
+ ccTypeName := CamelCaseSlice(typeName)
+ g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName)
+}
+
+// And now lots of helper functions.
+
+// Is c an ASCII lower-case letter?
+func isASCIILower(c byte) bool {
+ return 'a' <= c && c <= 'z'
+}
+
+// Is c an ASCII digit?
+func isASCIIDigit(c byte) bool {
+ return '0' <= c && c <= '9'
+}
+
+// CamelCase returns the CamelCased name.
+// If there is an interior underscore followed by a lower case letter,
+// drop the underscore and convert the letter to upper case.
+// There is a remote possibility of this rewrite causing a name collision,
+// but it's so remote we're prepared to pretend it's nonexistent - since the
+// C++ generator lowercases names, it's extremely unlikely to have two fields
+// with different capitalizations.
+// In short, _my_field_name_2 becomes XMyFieldName_2.
+func CamelCase(s string) string {
+ if s == "" {
+ return ""
+ }
+ t := make([]byte, 0, 32)
+ i := 0
+ if s[0] == '_' {
+ // Need a capital letter; drop the '_'.
+ t = append(t, 'X')
+ i++
+ }
+ // Invariant: if the next letter is lower case, it must be converted
+ // to upper case.
+ // That is, we process a word at a time, where words are marked by _ or
+ // upper case letter. Digits are treated as words.
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) {
+ continue // Skip the underscore in s.
+ }
+ if isASCIIDigit(c) {
+ t = append(t, c)
+ continue
+ }
+ // Assume we have a letter now - if not, it's a bogus identifier.
+ // The next word is a sequence of characters that must start upper case.
+ if isASCIILower(c) {
+ c ^= ' ' // Make it a capital letter.
+ }
+ t = append(t, c) // Guaranteed not lower case.
+ // Accept lower case sequence that follows.
+ for i+1 < len(s) && isASCIILower(s[i+1]) {
+ i++
+ t = append(t, s[i])
+ }
+ }
+ return string(t)
+}
+
+// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to
+// be joined with "_".
+func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) }
+
+// dottedSlice turns a sliced name into a dotted name.
+func dottedSlice(elem []string) string { return strings.Join(elem, ".") }
+
+// Is this field optional?
+func isOptional(field *descriptor.FieldDescriptorProto) bool {
+ return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+// Is this field required?
+func isRequired(field *descriptor.FieldDescriptorProto) bool {
+ return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// Is this field repeated?
+func isRepeated(field *descriptor.FieldDescriptorProto) bool {
+ return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// Is this field a scalar numeric type?
+func isScalar(field *descriptor.FieldDescriptorProto) bool {
+ if field.Type == nil {
+ return false
+ }
+ switch *field.Type {
+ case descriptor.FieldDescriptorProto_TYPE_DOUBLE,
+ descriptor.FieldDescriptorProto_TYPE_FLOAT,
+ descriptor.FieldDescriptorProto_TYPE_INT64,
+ descriptor.FieldDescriptorProto_TYPE_UINT64,
+ descriptor.FieldDescriptorProto_TYPE_INT32,
+ descriptor.FieldDescriptorProto_TYPE_FIXED64,
+ descriptor.FieldDescriptorProto_TYPE_FIXED32,
+ descriptor.FieldDescriptorProto_TYPE_BOOL,
+ descriptor.FieldDescriptorProto_TYPE_UINT32,
+ descriptor.FieldDescriptorProto_TYPE_ENUM,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptor.FieldDescriptorProto_TYPE_SINT32,
+ descriptor.FieldDescriptorProto_TYPE_SINT64:
+ return true
+ default:
+ return false
+ }
+}
+
+// badToUnderscore is the mapping function used to generate Go names from package names,
+// which can be dotted in the input .proto file. It replaces non-identifier characters such as
+// dot or dash with underscore.
+func badToUnderscore(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
+ return r
+ }
+ return '_'
+}
+
+// baseName returns the last path element of the name, with the last dotted suffix removed.
+func baseName(name string) string {
+ // First, find the last element
+ if i := strings.LastIndex(name, "/"); i >= 0 {
+ name = name[i+1:]
+ }
+ // Now drop the suffix
+ if i := strings.LastIndex(name, "."); i >= 0 {
+ name = name[0:i]
+ }
+ return name
+}
+
+// The SourceCodeInfo message describes the location of elements of a parsed
+// .proto file by way of a "path", which is a sequence of integers that
+// describe the route from a FileDescriptorProto to the relevant submessage.
+// The path alternates between a field number of a repeated field, and an index
+// into that repeated field. The constants below define the field numbers that
+// are used.
+//
+// See descriptor.proto for more information about this.
+const (
+ // tag numbers in FileDescriptorProto
+ packagePath = 2 // package
+ messagePath = 4 // message_type
+ enumPath = 5 // enum_type
+ // tag numbers in DescriptorProto
+ messageFieldPath = 2 // field
+ messageMessagePath = 3 // nested_type
+ messageEnumPath = 4 // enum_type
+ messageOneofPath = 8 // oneof_decl
+ // tag numbers in EnumDescriptorProto
+ enumValuePath = 2 // value
+)
+
+var supportTypeAliases bool
+
+func init() {
+ for _, tag := range build.Default.ReleaseTags {
+ if tag == "go1.9" {
+ supportTypeAliases = true
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
new file mode 100644
index 000000000000..a9b61036cc0f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
@@ -0,0 +1,117 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package remap handles tracking the locations of Go tokens in a source text
+across a rewrite by the Go formatter.
+*/
+package remap
+
+import (
+ "fmt"
+ "go/scanner"
+ "go/token"
+)
+
+// A Location represents a span of byte offsets in the source text.
+type Location struct {
+ Pos, End int // End is exclusive
+}
+
+// A Map represents a mapping between token locations in an input source text
+// and locations in the correspnding output text.
+type Map map[Location]Location
+
+// Find reports whether the specified span is recorded by m, and if so returns
+// the new location it was mapped to. If the input span was not found, the
+// returned location is the same as the input.
+func (m Map) Find(pos, end int) (Location, bool) {
+ key := Location{
+ Pos: pos,
+ End: end,
+ }
+ if loc, ok := m[key]; ok {
+ return loc, true
+ }
+ return key, false
+}
+
+func (m Map) add(opos, oend, npos, nend int) {
+ m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
+}
+
+// Compute constructs a location mapping from input to output. An error is
+// reported if any of the tokens of output cannot be mapped.
+func Compute(input, output []byte) (Map, error) {
+ itok := tokenize(input)
+ otok := tokenize(output)
+ if len(itok) != len(otok) {
+ return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
+ }
+ m := make(Map)
+ for i, ti := range itok {
+ to := otok[i]
+ if ti.Token != to.Token {
+ return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
+ }
+ m.add(ti.pos, ti.end, to.pos, to.end)
+ }
+ return m, nil
+}
+
+// tokinfo records the span and type of a source token.
+type tokinfo struct {
+ pos, end int
+ token.Token
+}
+
+func tokenize(src []byte) []tokinfo {
+ fs := token.NewFileSet()
+ var s scanner.Scanner
+ s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
+ var info []tokinfo
+ for {
+ pos, next, lit := s.Scan()
+ switch next {
+ case token.SEMICOLON:
+ continue
+ }
+ info = append(info, tokinfo{
+ pos: int(pos - 1),
+ end: int(pos + token.Pos(len(lit)) - 1),
+ Token: next,
+ })
+ if next == token.EOF {
+ break
+ }
+ }
+ return info
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
new file mode 100644
index 000000000000..61bfc10e02e7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
@@ -0,0 +1,369 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/compiler/plugin.proto
+
+/*
+Package plugin_go is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/compiler/plugin.proto
+
+It has these top-level messages:
+ Version
+ CodeGeneratorRequest
+ CodeGeneratorResponse
+*/
+package plugin_go
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The version number of protocol compiler.
+type Version struct {
+ Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
+ Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
+ Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
+ // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+ // be empty for mainline stable releases.
+ Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Version) Reset() { *m = Version{} }
+func (m *Version) String() string { return proto.CompactTextString(m) }
+func (*Version) ProtoMessage() {}
+func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *Version) Unmarshal(b []byte) error {
+ return xxx_messageInfo_Version.Unmarshal(m, b)
+}
+func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Version.Marshal(b, m, deterministic)
+}
+func (dst *Version) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Version.Merge(dst, src)
+}
+func (m *Version) XXX_Size() int {
+ return xxx_messageInfo_Version.Size(m)
+}
+func (m *Version) XXX_DiscardUnknown() {
+ xxx_messageInfo_Version.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Version proto.InternalMessageInfo
+
+func (m *Version) GetMajor() int32 {
+ if m != nil && m.Major != nil {
+ return *m.Major
+ }
+ return 0
+}
+
+func (m *Version) GetMinor() int32 {
+ if m != nil && m.Minor != nil {
+ return *m.Minor
+ }
+ return 0
+}
+
+func (m *Version) GetPatch() int32 {
+ if m != nil && m.Patch != nil {
+ return *m.Patch
+ }
+ return 0
+}
+
+func (m *Version) GetSuffix() string {
+ if m != nil && m.Suffix != nil {
+ return *m.Suffix
+ }
+ return ""
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+type CodeGeneratorRequest struct {
+ // The .proto files that were explicitly listed on the command-line. The
+ // code generator should generate code only for these files. Each file's
+ // descriptor will be included in proto_file, below.
+ FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
+ // The generator parameter passed on the command-line.
+ Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+ // FileDescriptorProtos for all files in files_to_generate and everything
+ // they import. The files will appear in topological order, so each file
+ // appears before any file that imports it.
+ //
+ // protoc guarantees that all proto_files will be written after
+ // the fields above, even though this is not technically guaranteed by the
+ // protobuf wire format. This theoretically could allow a plugin to stream
+ // in the FileDescriptorProtos and handle them one by one rather than read
+ // the entire set into memory at once. However, as of this writing, this
+ // is not similarly optimized on protoc's end -- it will store all fields in
+ // memory at once before sending them to the plugin.
+ //
+ // Type names of fields and extensions in the FileDescriptorProto are always
+ // fully qualified.
+ ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
+ // The version number of protocol compiler.
+ CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} }
+func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) }
+func (*CodeGeneratorRequest) ProtoMessage() {}
+func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *CodeGeneratorRequest) Unmarshal(b []byte) error {
+ return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b)
+}
+func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src)
+}
+func (m *CodeGeneratorRequest) XXX_Size() int {
+ return xxx_messageInfo_CodeGeneratorRequest.Size(m)
+}
+func (m *CodeGeneratorRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo
+
+func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
+ if m != nil {
+ return m.FileToGenerate
+ }
+ return nil
+}
+
+func (m *CodeGeneratorRequest) GetParameter() string {
+ if m != nil && m.Parameter != nil {
+ return *m.Parameter
+ }
+ return ""
+}
+
+func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
+ if m != nil {
+ return m.ProtoFile
+ }
+ return nil
+}
+
+func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
+ if m != nil {
+ return m.CompilerVersion
+ }
+ return nil
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+type CodeGeneratorResponse struct {
+ // Error message. If non-empty, code generation failed. The plugin process
+ // should exit with status code zero even if it reports an error in this way.
+ //
+ // This should be used to indicate errors in .proto files which prevent the
+ // code generator from generating correct code. Errors which indicate a
+ // problem in protoc itself -- such as the input CodeGeneratorRequest being
+ // unparseable -- should be reported by writing a message to stderr and
+ // exiting with a non-zero status code.
+ Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+ File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} }
+func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse) ProtoMessage() {}
+func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *CodeGeneratorResponse) Unmarshal(b []byte) error {
+ return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b)
+}
+func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src)
+}
+func (m *CodeGeneratorResponse) XXX_Size() int {
+ return xxx_messageInfo_CodeGeneratorResponse.Size(m)
+}
+func (m *CodeGeneratorResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo
+
+func (m *CodeGeneratorResponse) GetError() string {
+ if m != nil && m.Error != nil {
+ return *m.Error
+ }
+ return ""
+}
+
+func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
+ if m != nil {
+ return m.File
+ }
+ return nil
+}
+
+// Represents a single generated file.
+type CodeGeneratorResponse_File struct {
+ // The file name, relative to the output directory. The name must not
+ // contain "." or ".." components and must be relative, not be absolute (so,
+ // the file cannot lie outside the output directory). "/" must be used as
+ // the path separator, not "\".
+ //
+ // If the name is omitted, the content will be appended to the previous
+ // file. This allows the generator to break large files into small chunks,
+ // and allows the generated text to be streamed back to protoc so that large
+ // files need not reside completely in memory at one time. Note that as of
+ // this writing protoc does not optimize for this -- it will read the entire
+ // CodeGeneratorResponse before writing files to disk.
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // If non-empty, indicates that the named file should already exist, and the
+ // content here is to be inserted into that file at a defined insertion
+ // point. This feature allows a code generator to extend the output
+ // produced by another code generator. The original generator may provide
+ // insertion points by placing special annotations in the file that look
+ // like:
+ // @@protoc_insertion_point(NAME)
+ // The annotation can have arbitrary text before and after it on the line,
+ // which allows it to be placed in a comment. NAME should be replaced with
+ // an identifier naming the point -- this is what other generators will use
+ // as the insertion_point. Code inserted at this point will be placed
+ // immediately above the line containing the insertion point (thus multiple
+ // insertions to the same point will come out in the order they were added).
+ // The double-@ is intended to make it unlikely that the generated code
+ // could contain things that look like insertion points by accident.
+ //
+ // For example, the C++ code generator places the following line in the
+ // .pb.h files that it generates:
+ // // @@protoc_insertion_point(namespace_scope)
+ // This line appears within the scope of the file's package namespace, but
+ // outside of any particular class. Another plugin can then specify the
+ // insertion_point "namespace_scope" to generate additional classes or
+ // other declarations that should be placed in this scope.
+ //
+ // Note that if the line containing the insertion point begins with
+ // whitespace, the same whitespace will be added to every line of the
+ // inserted text. This is useful for languages like Python, where
+ // indentation matters. In these languages, the insertion point comment
+ // should be indented the same amount as any inserted code will need to be
+ // in order to work correctly in that context.
+ //
+ // The code generator that generates the initial file and the one which
+ // inserts into it must both run as part of a single invocation of protoc.
+ // Code generators are executed in the order in which they appear on the
+ // command line.
+ //
+ // If |insertion_point| is present, |name| must also be present.
+ InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
+ // The file contents.
+ Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} }
+func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse_File) ProtoMessage() {}
+func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error {
+ return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b)
+}
+func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src)
+}
+func (m *CodeGeneratorResponse_File) XXX_Size() int {
+ return xxx_messageInfo_CodeGeneratorResponse_File.Size(m)
+}
+func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() {
+ xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo
+
+func (m *CodeGeneratorResponse_File) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
+ if m != nil && m.InsertionPoint != nil {
+ return *m.InsertionPoint
+ }
+ return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetContent() string {
+ if m != nil && m.Content != nil {
+ return *m.Content
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
+ proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
+ proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
+ proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
+}
+
+func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 417 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
+ 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
+ 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
+ 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
+ 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
+ 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
+ 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
+ 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
+ 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
+ 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
+ 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
+ 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
+ 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
+ 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
+ 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
+ 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
+ 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
+ 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
+ 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
+ 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
+ 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
+ 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
+ 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
+ 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
+ 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
+ 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
new file mode 100644
index 000000000000..8953d0ff827e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go.
+// source: google/protobuf/compiler/plugin.proto
+// DO NOT EDIT!
+
+package google_protobuf_compiler
+
+import proto "github.com/golang/protobuf/proto"
+import "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference proto and math imports to suppress error if they are not otherwise used.
+var _ = proto.GetString
+var _ = math.Inf
+
+type CodeGeneratorRequest struct {
+ FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
+ Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+ ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} }
+func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorRequest) ProtoMessage() {}
+
+func (this *CodeGeneratorRequest) GetParameter() string {
+ if this != nil && this.Parameter != nil {
+ return *this.Parameter
+ }
+ return ""
+}
+
+type CodeGeneratorResponse struct {
+ Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+ File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} }
+func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse) ProtoMessage() {}
+
+func (this *CodeGeneratorResponse) GetError() string {
+ if this != nil && this.Error != nil {
+ return *this.Error
+ }
+ return ""
+}
+
+type CodeGeneratorResponse_File struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
+ Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} }
+func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse_File) ProtoMessage() {}
+
+func (this *CodeGeneratorResponse_File) GetName() string {
+ if this != nil && this.Name != nil {
+ return *this.Name
+ }
+ return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
+ if this != nil && this.InsertionPoint != nil {
+ return *this.InsertionPoint
+ }
+ return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetContent() string {
+ if this != nil && this.Content != nil {
+ return *this.Content
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
new file mode 100644
index 000000000000..5b5574529ed4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
@@ -0,0 +1,167 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to
+// change.
+//
+// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
+// CodeGeneratorResponse to stdout.
+//
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
+// of dealing with the raw protocol defined here.
+//
+// A plugin executable needs only to be placed somewhere in the path. The
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
+// flag "--${NAME}_out" is passed to protoc.
+
+syntax = "proto2";
+package google.protobuf.compiler;
+option java_package = "com.google.protobuf.compiler";
+option java_outer_classname = "PluginProtos";
+
+option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
+
+import "google/protobuf/descriptor.proto";
+
+// The version number of protocol compiler.
+message Version {
+ optional int32 major = 1;
+ optional int32 minor = 2;
+ optional int32 patch = 3;
+ // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+ // be empty for mainline stable releases.
+ optional string suffix = 4;
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+message CodeGeneratorRequest {
+ // The .proto files that were explicitly listed on the command-line. The
+ // code generator should generate code only for these files. Each file's
+ // descriptor will be included in proto_file, below.
+ repeated string file_to_generate = 1;
+
+ // The generator parameter passed on the command-line.
+ optional string parameter = 2;
+
+ // FileDescriptorProtos for all files in files_to_generate and everything
+ // they import. The files will appear in topological order, so each file
+ // appears before any file that imports it.
+ //
+ // protoc guarantees that all proto_files will be written after
+ // the fields above, even though this is not technically guaranteed by the
+ // protobuf wire format. This theoretically could allow a plugin to stream
+ // in the FileDescriptorProtos and handle them one by one rather than read
+ // the entire set into memory at once. However, as of this writing, this
+ // is not similarly optimized on protoc's end -- it will store all fields in
+ // memory at once before sending them to the plugin.
+ //
+ // Type names of fields and extensions in the FileDescriptorProto are always
+ // fully qualified.
+ repeated FileDescriptorProto proto_file = 15;
+
+ // The version number of protocol compiler.
+ optional Version compiler_version = 3;
+
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+message CodeGeneratorResponse {
+ // Error message. If non-empty, code generation failed. The plugin process
+ // should exit with status code zero even if it reports an error in this way.
+ //
+ // This should be used to indicate errors in .proto files which prevent the
+ // code generator from generating correct code. Errors which indicate a
+ // problem in protoc itself -- such as the input CodeGeneratorRequest being
+ // unparseable -- should be reported by writing a message to stderr and
+ // exiting with a non-zero status code.
+ optional string error = 1;
+
+ // Represents a single generated file.
+ message File {
+ // The file name, relative to the output directory. The name must not
+ // contain "." or ".." components and must be relative, not be absolute (so,
+ // the file cannot lie outside the output directory). "/" must be used as
+ // the path separator, not "\".
+ //
+ // If the name is omitted, the content will be appended to the previous
+ // file. This allows the generator to break large files into small chunks,
+ // and allows the generated text to be streamed back to protoc so that large
+ // files need not reside completely in memory at one time. Note that as of
+ // this writing protoc does not optimize for this -- it will read the entire
+ // CodeGeneratorResponse before writing files to disk.
+ optional string name = 1;
+
+ // If non-empty, indicates that the named file should already exist, and the
+ // content here is to be inserted into that file at a defined insertion
+ // point. This feature allows a code generator to extend the output
+ // produced by another code generator. The original generator may provide
+ // insertion points by placing special annotations in the file that look
+ // like:
+ // @@protoc_insertion_point(NAME)
+ // The annotation can have arbitrary text before and after it on the line,
+ // which allows it to be placed in a comment. NAME should be replaced with
+ // an identifier naming the point -- this is what other generators will use
+ // as the insertion_point. Code inserted at this point will be placed
+ // immediately above the line containing the insertion point (thus multiple
+ // insertions to the same point will come out in the order they were added).
+ // The double-@ is intended to make it unlikely that the generated code
+ // could contain things that look like insertion points by accident.
+ //
+ // For example, the C++ code generator places the following line in the
+ // .pb.h files that it generates:
+ // // @@protoc_insertion_point(namespace_scope)
+ // This line appears within the scope of the file's package namespace, but
+ // outside of any particular class. Another plugin can then specify the
+ // insertion_point "namespace_scope" to generate additional classes or
+ // other declarations that should be placed in this scope.
+ //
+ // Note that if the line containing the insertion point begins with
+ // whitespace, the same whitespace will be added to every line of the
+ // inserted text. This is useful for languages like Python, where
+ // indentation matters. In these languages, the insertion point comment
+ // should be indented the same amount as any inserted code will need to be
+ // in order to work correctly in that context.
+ //
+ // The code generator that generates the initial file and the one which
+ // inserts into it must both run as part of a single invocation of protoc.
+ // Code generators are executed in the order in which they appear on the
+ // command line.
+ //
+ // If |insertion_point| is present, |name| must also be present.
+ optional string insertion_point = 2;
+
+ // The file contents.
+ optional string content = 15;
+ }
+ repeated File file = 15;
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
new file mode 100644
index 000000000000..c198e6a4c7c5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.gitignore
@@ -0,0 +1,202 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Go template
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+### Windows template
+# Windows image file caches
+Thumbs.db
+ehthumbs.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+### Kate template
+# Swap Files #
+.*.kate-swp
+.swp.*
+### SublimeText template
+# cache files for sublime text
+*.tmlanguage.cache
+*.tmPreferences.cache
+*.stTheme.cache
+
+# workspace files are user-specific
+*.sublime-workspace
+
+# project files should be checked into the repository, unless a significant
+# proportion of contributors will probably not be using SublimeText
+# *.sublime-project
+
+# sftp configuration file
+sftp-config.json
+### Linux template
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff:
+.idea
+.idea/tasks.xml
+.idea/dictionaries
+.idea/vcs.xml
+.idea/jsLibraryMappings.xml
+
+# Sensitive or high-churn files:
+.idea/dataSources.ids
+.idea/dataSources.xml
+.idea/dataSources.local.xml
+.idea/sqlDataSources.xml
+.idea/dynamic.xml
+.idea/uiDesigner.xml
+
+# Gradle:
+.idea/gradle.xml
+.idea/libraries
+
+# Mongo Explorer plugin:
+.idea/mongoSettings.xml
+
+## File-based project format:
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+/out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+### Xcode template
+# Xcode
+#
+# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
+
+## Build generated
+build/
+DerivedData/
+
+## Various settings
+*.pbxuser
+!default.pbxuser
+*.mode1v3
+!default.mode1v3
+*.mode2v3
+!default.mode2v3
+*.perspectivev3
+!default.perspectivev3
+xcuserdata/
+
+## Other
+*.moved-aside
+*.xccheckout
+*.xcscmblueprint
+### Eclipse template
+
+.metadata
+bin/
+tmp/
+*.tmp
+*.bak
+*.swp
+*~.nib
+local.properties
+.settings/
+.loadpath
+.recommenders
+
+# Eclipse Core
+.project
+
+# External tool builders
+.externalToolBuilders/
+
+# Locally stored "Eclipse launch configurations"
+*.launch
+
+# PyDev specific (Python IDE for Eclipse)
+*.pydevproject
+
+# CDT-specific (C/C++ Development Tooling)
+.cproject
+
+# JDT-specific (Eclipse Java Development Tools)
+.classpath
+
+# Java annotation processor (APT)
+.factorypath
+
+# PDT-specific (PHP Development Tools)
+.buildpath
+
+# sbteclipse plugin
+.target
+
+# Tern plugin
+.tern-project
+
+# TeXlipse plugin
+.texlipse
+
+# STS (Spring Tool Suite)
+.springBeans
+
+# Code Recommenders
+.recommenders/
+
+
+coverage.txt
+
+#vendor
+vendor/
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
new file mode 100644
index 000000000000..702fa5b72513
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+language: go
+go:
+ - 1.8.x
+env:
+ - DEP_VERSION="0.3.2"
+
+before_install:
+ # Download the binary to bin folder in $GOPATH
+ - curl -L -s https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 -o $GOPATH/bin/dep
+ # Make the binary executable
+ - chmod +x $GOPATH/bin/dep
+
+install:
+ - dep ensure
+
+script:
+ - make test
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
new file mode 100644
index 000000000000..0e64822d2cc9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
@@ -0,0 +1,30 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+Types of changes:
+- `Added` for new features.
+- `Changed` for changes in existing functionality.
+- `Deprecated` for soon-to-be removed features.
+- `Removed` for now removed features.
+- `Fixed` for any bug fixes.
+- `Security` in case of vulnerabilities.
+
+## [Unreleased]
+### Added
+- This CHANGELOG file to keep track of changes.
+
+## 1.0.0 - 2018-05-08
+### Added
+- grpc_auth
+- grpc_ctxtags
+- grpc_zap
+- grpc_logrus
+- grpc_opentracing
+- grpc_retry
+- grpc_validator
+- grpc_recovery
+
+[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...HEAD
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md
new file mode 100644
index 000000000000..dd52ab8938e0
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CONTRIBUTING.md
@@ -0,0 +1,20 @@
+# Contributing
+
+We would love to have people submit pull requests and help make `grpc-ecosystem/go-grpc-middleware` even better 👍.
+
+Fork, then clone the repo:
+
+```bash
+git clone git@github.com:your-username/go-grpc-middleware.git
+```
+
+Before checking in please run the following:
+
+```bash
+make all
+```
+
+This will `vet`, `fmt`, regenerate documentation and run all tests.
+
+
+Push to your fork and open a pull request.
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
new file mode 100644
index 000000000000..ebdcb75a878d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.lock
@@ -0,0 +1,123 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "cloud.google.com/go"
+ packages = ["compute/metadata"]
+ revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
+ version = "v0.16.0"
+
+[[projects]]
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "github.com/gogo/protobuf"
+ packages = ["gogoproto","proto","protoc-gen-gogo/descriptor"]
+ revision = "342cbe0a04158f6dcb03ca0079991a51a4248c02"
+ version = "v0.5"
+
+[[projects]]
+ branch = "master"
+ name = "github.com/golang/protobuf"
+ packages = ["jsonpb","proto","ptypes","ptypes/any","ptypes/duration","ptypes/struct","ptypes/timestamp"]
+ revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
+
+[[projects]]
+ name = "github.com/opentracing/opentracing-go"
+ packages = [".","ext","log","mocktracer"]
+ revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
+ version = "v1.0.2"
+
+[[projects]]
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+ version = "v1.0.0"
+
+[[projects]]
+ name = "github.com/sirupsen/logrus"
+ packages = ["."]
+ revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
+ version = "v1.0.3"
+
+[[projects]]
+ name = "github.com/stretchr/testify"
+ packages = ["assert","require","suite"]
+ revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
+ version = "v1.1.4"
+
+[[projects]]
+ name = "go.uber.org/atomic"
+ packages = ["."]
+ revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8"
+ version = "v1.3.1"
+
+[[projects]]
+ name = "go.uber.org/multierr"
+ packages = ["."]
+ revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a"
+ version = "v1.1.0"
+
+[[projects]]
+ name = "go.uber.org/zap"
+ packages = [".","buffer","internal/bufferpool","internal/color","internal/exit","zapcore"]
+ revision = "35aad584952c3e7020db7b839f6b102de6271f89"
+ version = "v1.7.1"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/crypto"
+ packages = ["ssh/terminal"]
+ revision = "94eea52f7b742c7cbe0b03b22f0c4c8631ece122"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/net"
+ packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
+ revision = "a8b9294777976932365dabb6640cf1468d95c70f"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/oauth2"
+ packages = [".","google","internal","jws","jwt"]
+ revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/sys"
+ packages = ["unix","windows"]
+ revision = "13fcbd661c8ececa8807a29b48407d674b1d8ed8"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/text"
+ packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
+ revision = "75cc3cad82b5f47d3fb229ddda8c5167da14f294"
+
+[[projects]]
+ name = "google.golang.org/appengine"
+ packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
+ revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
+ version = "v1.0.0"
+
+[[projects]]
+ branch = "master"
+ name = "google.golang.org/genproto"
+ packages = ["googleapis/rpc/status"]
+ revision = "7f0da29060c682909f650ad8ed4e515bd74fa12a"
+
+[[projects]]
+ name = "google.golang.org/grpc"
+ packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","credentials/oauth","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
+ revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
+ version = "v1.8.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "b24c6670412eb0bc44ed1db77fecc52333f8725f3e3272bdc568f5683a63031f"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
new file mode 100644
index 000000000000..0a7d4c1cd84c
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/Gopkg.toml
@@ -0,0 +1,35 @@
+[[constraint]]
+ name = "github.com/gogo/protobuf"
+ version = "0.5.0"
+
+[[constraint]]
+ branch = "master"
+ name = "github.com/golang/protobuf"
+
+[[constraint]]
+ name = "github.com/opentracing/opentracing-go"
+ version = "1.0.2"
+
+[[constraint]]
+ name = "github.com/sirupsen/logrus"
+ version = "1.0.3"
+
+[[constraint]]
+ name = "github.com/stretchr/testify"
+ version = "1.1.4"
+
+[[constraint]]
+ name = "go.uber.org/zap"
+ version = "1.7.1"
+
+[[constraint]]
+ branch = "master"
+ name = "golang.org/x/net"
+
+[[constraint]]
+ branch = "master"
+ name = "golang.org/x/oauth2"
+
+[[constraint]]
+ name = "google.golang.org/grpc"
+ version = "1.8.0"
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE
new file mode 100644
index 000000000000..b2b065037fc4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
new file mode 100644
index 000000000000..224069b22374
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
@@ -0,0 +1,84 @@
+# Go gRPC Middleware
+
+[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware.svg?branch=master)](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware)
+[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware)
+[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware)
+[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge)
+[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
+[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
+[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
+[![Slack](slack.png)](https://join.slack.com/t/improbable-eng/shared_invite/enQtMzQ1ODcyMzQ5MjM4LWY5ZWZmNGM2ODc5MmViNmQ3ZTA3ZTY3NzQwOTBlMTkzZmIxZTIxODk0OWU3YjZhNWVlNDU3MDlkZGViZjhkMjc)
+
+[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
+
+## Middleware
+
+[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
+Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs)
+that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement
+common patterns: auth, logging, message, validation, retries or monitoring.
+
+These are generic building blocks that make it easy to build multiple microservices easily.
+The purpose of this repository is to act as a go-to point for such reusable functionality. It contains
+some of them itself, but also will link to useful external repos.
+
+`grpc_middleware` itself provides support for chaining interceptors, here's an example:
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-middleware"
+
+myServer := grpc.NewServer(
+ grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
+ grpc_ctxtags.StreamServerInterceptor(),
+ grpc_opentracing.StreamServerInterceptor(),
+ grpc_prometheus.StreamServerInterceptor,
+ grpc_zap.StreamServerInterceptor(zapLogger),
+ grpc_auth.StreamServerInterceptor(myAuthFunction),
+ grpc_recovery.StreamServerInterceptor(),
+ )),
+ grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
+ grpc_ctxtags.UnaryServerInterceptor(),
+ grpc_opentracing.UnaryServerInterceptor(),
+ grpc_prometheus.UnaryServerInterceptor,
+ grpc_zap.UnaryServerInterceptor(zapLogger),
+ grpc_auth.UnaryServerInterceptor(myAuthFunction),
+ grpc_recovery.UnaryServerInterceptor(),
+ )),
+)
+```
+
+## Interceptors
+
+*Please send a PR to add new interceptors or middleware to this list*
+
+#### Auth
+ * [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware
+
+#### Logging
+ * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
+ * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
+ * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
+
+
+#### Monitoring
+ * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
+ * [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
+ * [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
+
+#### Client
+ * [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
+
+#### Server
+ * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
+ * [`grpc_recovery`](recovery/) - turn panics into gRPC errors
+
+
+## Status
+
+This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io).
+
+Additional tooling will be added, and contributions are welcome.
+
+## License
+
+`go-grpc-middleware` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
new file mode 100644
index 000000000000..45a2f5f49a71
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
@@ -0,0 +1,183 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Server Interceptor chaining middleware.
+
+package grpc_middleware
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+// ChainUnaryServer creates a single interceptor out of a chain of many interceptors.
+//
+// Execution is done in left-to-right order, including passing of context.
+// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
+// will see context changes of one and two.
+func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
+ n := len(interceptors)
+
+ if n > 1 {
+ lastI := n - 1
+ return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ var (
+ chainHandler grpc.UnaryHandler
+ curI int
+ )
+
+ chainHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
+ if curI == lastI {
+ return handler(currentCtx, currentReq)
+ }
+ curI++
+ resp, err := interceptors[curI](currentCtx, currentReq, info, chainHandler)
+ curI--
+ return resp, err
+ }
+
+ return interceptors[0](ctx, req, info, chainHandler)
+ }
+ }
+
+ if n == 1 {
+ return interceptors[0]
+ }
+
+ // n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
+ return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ return handler(ctx, req)
+ }
+}
+
+// ChainStreamServer creates a single interceptor out of a chain of many interceptors.
+//
+// Execution is done in left-to-right order, including passing of context.
+// For example ChainUnaryServer(one, two, three) will execute one before two before three.
+// If you want to pass context between interceptors, use WrapServerStream.
+func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
+ n := len(interceptors)
+
+ if n > 1 {
+ lastI := n - 1
+ return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ var (
+ chainHandler grpc.StreamHandler
+ curI int
+ )
+
+ chainHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
+ if curI == lastI {
+ return handler(currentSrv, currentStream)
+ }
+ curI++
+ err := interceptors[curI](currentSrv, currentStream, info, chainHandler)
+ curI--
+ return err
+ }
+
+ return interceptors[0](srv, stream, info, chainHandler)
+ }
+ }
+
+ if n == 1 {
+ return interceptors[0]
+ }
+
+ // n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
+ return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ return handler(srv, stream)
+ }
+}
+
+// ChainUnaryClient creates a single interceptor out of a chain of many interceptors.
+//
+// Execution is done in left-to-right order, including passing of context.
+// For example ChainUnaryClient(one, two, three) will execute one before two before three.
+func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
+ n := len(interceptors)
+
+ if n > 1 {
+ lastI := n - 1
+ return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ var (
+ chainHandler grpc.UnaryInvoker
+ curI int
+ )
+
+ chainHandler = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
+ if curI == lastI {
+ return invoker(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentOpts...)
+ }
+ curI++
+ err := interceptors[curI](currentCtx, currentMethod, currentReq, currentRepl, currentConn, chainHandler, currentOpts...)
+ curI--
+ return err
+ }
+
+ return interceptors[0](ctx, method, req, reply, cc, chainHandler, opts...)
+ }
+ }
+
+ if n == 1 {
+ return interceptors[0]
+ }
+
+ // n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
+ return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ return invoker(ctx, method, req, reply, cc, opts...)
+ }
+}
+
+// ChainStreamClient creates a single interceptor out of a chain of many interceptors.
+//
+// Execution is done in left-to-right order, including passing of context.
+// For example ChainStreamClient(one, two, three) will execute one before two before three.
+func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
+ n := len(interceptors)
+
+ if n > 1 {
+ lastI := n - 1
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ var (
+ chainHandler grpc.Streamer
+ curI int
+ )
+
+ chainHandler = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
+ if curI == lastI {
+ return streamer(currentCtx, currentDesc, currentConn, currentMethod, currentOpts...)
+ }
+ curI++
+ stream, err := interceptors[curI](currentCtx, currentDesc, currentConn, currentMethod, chainHandler, currentOpts...)
+ curI--
+ return stream, err
+ }
+
+ return interceptors[0](ctx, desc, cc, method, chainHandler, opts...)
+ }
+ }
+
+ if n == 1 {
+ return interceptors[0]
+ }
+
+ // n == 0; Dummy interceptor maintained for backward compatibility to avoid returning nil.
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ return streamer(ctx, desc, cc, method, opts...)
+ }
+}
+
+// Chain creates a single interceptor out of a chain of many interceptors.
+//
+// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors.
+// Basically syntactic sugar.
+func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption {
+ return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...))
+}
+
+// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors.
+// Basically syntactic sugar.
+func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption {
+ return grpc.StreamInterceptor(ChainStreamServer(interceptors...))
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
new file mode 100644
index 000000000000..716895036423
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go
@@ -0,0 +1,69 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+/*
+`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools.
+
+Middleware
+
+gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the
+upstream gRPC codebase is relatively bare bones.
+
+This package, and most of its child packages provides commonly needed middleware for gRPC:
+client-side interceptors for retires, server-side interceptors for input validation and auth,
+functions for chaining said interceptors, metadata convenience methods and more.
+
+Chaining
+
+By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on
+the server side. `grpc_middleware` provides convenient chaining methods
+
+Simple way of turning a multiple interceptors into a single interceptor. Here's an example for
+server chaining:
+
+ myServer := grpc.NewServer(
+ grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)),
+ grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary),
+ )
+
+These interceptors will be executed from left to right: logging, monitoring and auth.
+
+Here's an example for client side chaining:
+
+ clientConn, err = grpc.Dial(
+ address,
+ grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)),
+ grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)),
+ )
+ client = pb_testproto.NewTestServiceClient(clientConn)
+ resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
+
+These interceptors will be executed from left to right: monitoring and then retry logic.
+
+The retry interceptor will call every interceptor that follows it whenever when a retry happens.
+
+Writing Your Own
+
+Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting
+bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design.
+For example, you may want to pass the identity of the caller from the auth interceptor all the way
+to the handling function.
+
+For example, a client side interceptor example for auth looks like:
+
+ func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ newCtx := context.WithValue(ctx, "user_id", "john@example.com")
+ return handler(newCtx, req)
+ }
+
+Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within
+the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is
+needed. For example:
+
+ func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ newStream := grpc_middleware.WrapServerStream(stream)
+ newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com")
+ return handler(srv, stream)
+ }
+*/
+package grpc_middleware
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
new file mode 100644
index 000000000000..51dc5b8f2007
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/makefile
@@ -0,0 +1,16 @@
+SHELL=/bin/bash
+
+GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/)
+
+all: vet fmt test
+
+fmt:
+ go fmt $(GOFILES_NOVENDOR)
+
+vet:
+ go vet $(GOFILES_NOVENDOR)
+
+test: vet
+ ./scripts/test_all.sh
+
+.PHONY: all test
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png
new file mode 100644
index 000000000000..cc8f9a68a936
Binary files /dev/null and b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/slack.png differ
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
new file mode 100644
index 000000000000..597b862445fe
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go
@@ -0,0 +1,29 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_middleware
+
+import (
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+)
+
+// WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context.
+type WrappedServerStream struct {
+ grpc.ServerStream
+ // WrappedContext is the wrapper's own Context. You can assign it.
+ WrappedContext context.Context
+}
+
+// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context()
+func (w *WrappedServerStream) Context() context.Context {
+ return w.WrappedContext
+}
+
+// WrapServerStream returns a ServerStream that has the ability to overwrite context.
+func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream {
+ if existing, ok := stream.(*WrappedServerStream); ok {
+ return existing
+ }
+ return &WrappedServerStream{ServerStream: stream, WrappedContext: stream.Context()}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
new file mode 100644
index 000000000000..364516251b93
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the name of Gengo, Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
new file mode 100644
index 000000000000..76cafe6ec7f7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
@@ -0,0 +1,22 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+proto_library(
+ name = "internal_proto",
+ srcs = ["stream_chunk.proto"],
+ deps = ["@com_google_protobuf//:any_proto"],
+)
+
+go_proto_library(
+ name = "internal_go_proto",
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
+ proto = ":internal_proto",
+)
+
+go_library(
+ name = "go_default_library",
+ embed = [":internal_go_proto"],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
new file mode 100644
index 000000000000..8858f069046f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
@@ -0,0 +1,118 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: internal/stream_chunk.proto
+
+package internal
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import any "github.com/golang/protobuf/ptypes/any"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// StreamError is a response type which is returned when
+// streaming rpc returns an error.
+type StreamError struct {
+ GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
+ HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
+ Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
+ HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
+ Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StreamError) Reset() { *m = StreamError{} }
+func (m *StreamError) String() string { return proto.CompactTextString(m) }
+func (*StreamError) ProtoMessage() {}
+func (*StreamError) Descriptor() ([]byte, []int) {
+ return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0}
+}
+func (m *StreamError) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StreamError.Unmarshal(m, b)
+}
+func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
+}
+func (dst *StreamError) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StreamError.Merge(dst, src)
+}
+func (m *StreamError) XXX_Size() int {
+ return xxx_messageInfo_StreamError.Size(m)
+}
+func (m *StreamError) XXX_DiscardUnknown() {
+ xxx_messageInfo_StreamError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StreamError proto.InternalMessageInfo
+
+func (m *StreamError) GetGrpcCode() int32 {
+ if m != nil {
+ return m.GrpcCode
+ }
+ return 0
+}
+
+func (m *StreamError) GetHttpCode() int32 {
+ if m != nil {
+ return m.HttpCode
+ }
+ return 0
+}
+
+func (m *StreamError) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func (m *StreamError) GetHttpStatus() string {
+ if m != nil {
+ return m.HttpStatus
+ }
+ return ""
+}
+
+func (m *StreamError) GetDetails() []*any.Any {
+ if m != nil {
+ return m.Details
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
+}
+
+func init() {
+ proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7)
+}
+
+var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{
+ // 223 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30,
+ 0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23,
+ 0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78,
+ 0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce,
+ 0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2,
+ 0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a,
+ 0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f,
+ 0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54,
+ 0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7,
+ 0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5,
+ 0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9,
+ 0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac,
+ 0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18,
+ 0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
new file mode 100644
index 000000000000..55f42ce63ec0
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+package grpc.gateway.runtime;
+option go_package = "internal";
+
+import "google/protobuf/any.proto";
+
+// StreamError is a response type which is returned when
+// streaming rpc returns an error.
+message StreamError {
+ int32 grpc_code = 1;
+ int32 http_code = 2;
+ string message = 3;
+ string http_status = 4;
+ repeated google.protobuf.Any details = 5;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
new file mode 100644
index 000000000000..20862228ef87
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
@@ -0,0 +1,84 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "context.go",
+ "convert.go",
+ "doc.go",
+ "errors.go",
+ "fieldmask.go",
+ "handler.go",
+ "marshal_httpbodyproto.go",
+ "marshal_json.go",
+ "marshal_jsonpb.go",
+ "marshal_proto.go",
+ "marshaler.go",
+ "marshaler_registry.go",
+ "mux.go",
+ "pattern.go",
+ "proto2_convert.go",
+ "proto_errors.go",
+ "query.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
+ deps = [
+ "//internal:go_default_library",
+ "//utilities:go_default_library",
+ "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
+ "@com_github_golang_protobuf//proto:go_default_library",
+ "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen",
+ "@go_googleapis//google/api:httpbody_go_proto",
+ "@io_bazel_rules_go//proto/wkt:any_go_proto",
+ "@io_bazel_rules_go//proto/wkt:duration_go_proto",
+ "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+ "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
+ "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
+ "@org_golang_google_grpc//codes:go_default_library",
+ "@org_golang_google_grpc//grpclog:go_default_library",
+ "@org_golang_google_grpc//metadata:go_default_library",
+ "@org_golang_google_grpc//status:go_default_library",
+ ],
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = [
+ "context_test.go",
+ "errors_test.go",
+ "fieldmask_test.go",
+ "handler_test.go",
+ "marshal_httpbodyproto_test.go",
+ "marshal_json_test.go",
+ "marshal_jsonpb_test.go",
+ "marshal_proto_test.go",
+ "marshaler_registry_test.go",
+ "mux_test.go",
+ "pattern_test.go",
+ "query_test.go",
+ ],
+ embed = [":go_default_library"],
+ deps = [
+ "//examples/proto/examplepb:go_default_library",
+ "//internal:go_default_library",
+ "//utilities:go_default_library",
+ "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
+ "@com_github_golang_protobuf//proto:go_default_library",
+ "@com_github_golang_protobuf//ptypes:go_default_library_gen",
+ "@go_googleapis//google/api:httpbody_go_proto",
+ "@go_googleapis//google/rpc:errdetails_go_proto",
+ "@io_bazel_rules_go//proto/wkt:duration_go_proto",
+ "@io_bazel_rules_go//proto/wkt:empty_go_proto",
+ "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+ "@io_bazel_rules_go//proto/wkt:struct_go_proto",
+ "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
+ "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
+ "@org_golang_google_grpc//:go_default_library",
+ "@org_golang_google_grpc//codes:go_default_library",
+ "@org_golang_google_grpc//metadata:go_default_library",
+ "@org_golang_google_grpc//status:go_default_library",
+ ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
new file mode 100644
index 000000000000..896057e1e1e1
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
@@ -0,0 +1,210 @@
+package runtime
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net"
+ "net/http"
+ "net/textproto"
+ "strconv"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// MetadataHeaderPrefix is the http prefix that represents custom metadata
+// parameters to or from a gRPC call.
+const MetadataHeaderPrefix = "Grpc-Metadata-"
+
+// MetadataPrefix is prepended to permanent HTTP header keys (as specified
+// by the IANA) when added to the gRPC context.
+const MetadataPrefix = "grpcgateway-"
+
+// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
+// HTTP headers in a response handled by grpc-gateway
+const MetadataTrailerPrefix = "Grpc-Trailer-"
+
+const metadataGrpcTimeout = "Grpc-Timeout"
+const metadataHeaderBinarySuffix = "-Bin"
+
+const xForwardedFor = "X-Forwarded-For"
+const xForwardedHost = "X-Forwarded-Host"
+
+var (
+ // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+ // header isn't present. If the value is 0 the sent `context` will not have a timeout.
+ DefaultContextTimeout = 0 * time.Second
+)
+
+func decodeBinHeader(v string) ([]byte, error) {
+ if len(v)%4 == 0 {
+ // Input was padded, or padding was not necessary.
+ return base64.StdEncoding.DecodeString(v)
+ }
+ return base64.RawStdEncoding.DecodeString(v)
+}
+
+/*
+AnnotateContext adds context information such as metadata from the request.
+
+At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
+except that the forwarded destination is not another HTTP service but rather
+a gRPC service.
+*/
+func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
+ var pairs []string
+ timeout := DefaultContextTimeout
+ if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
+ var err error
+ timeout, err = timeoutDecode(tm)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
+ }
+ }
+
+ for key, vals := range req.Header {
+ for _, val := range vals {
+ key = textproto.CanonicalMIMEHeaderKey(key)
+ // For backwards-compatibility, pass through 'authorization' header with no prefix.
+ if key == "Authorization" {
+ pairs = append(pairs, "authorization", val)
+ }
+ if h, ok := mux.incomingHeaderMatcher(key); ok {
+ // Handles "-bin" metadata in grpc, since grpc will do another base64
+ // encode before sending to server, we need to decode it first.
+ if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
+ b, err := decodeBinHeader(val)
+ if err != nil {
+ return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
+ }
+
+ val = string(b)
+ }
+ pairs = append(pairs, h, val)
+ }
+ }
+ }
+ if host := req.Header.Get(xForwardedHost); host != "" {
+ pairs = append(pairs, strings.ToLower(xForwardedHost), host)
+ } else if req.Host != "" {
+ pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
+ }
+
+ if addr := req.RemoteAddr; addr != "" {
+ if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
+ if fwd := req.Header.Get(xForwardedFor); fwd == "" {
+ pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
+ } else {
+ pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
+ }
+ } else {
+ grpclog.Infof("invalid remote addr: %s", addr)
+ }
+ }
+
+ if timeout != 0 {
+ ctx, _ = context.WithTimeout(ctx, timeout)
+ }
+ if len(pairs) == 0 {
+ return ctx, nil
+ }
+ md := metadata.Pairs(pairs...)
+ for _, mda := range mux.metadataAnnotators {
+ md = metadata.Join(md, mda(ctx, req))
+ }
+ return metadata.NewOutgoingContext(ctx, md), nil
+}
+
+// ServerMetadata consists of metadata sent from gRPC server.
+type ServerMetadata struct {
+ HeaderMD metadata.MD
+ TrailerMD metadata.MD
+}
+
+type serverMetadataKey struct{}
+
+// NewServerMetadataContext creates a new context with ServerMetadata
+func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+ return context.WithValue(ctx, serverMetadataKey{}, md)
+}
+
+// ServerMetadataFromContext returns the ServerMetadata in ctx
+func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+ md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
+ return
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+ size := len(s)
+ if size < 2 {
+ return 0, fmt.Errorf("timeout string is too short: %q", s)
+ }
+ d, ok := timeoutUnitToDuration(s[size-1])
+ if !ok {
+ return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
+ }
+ t, err := strconv.ParseInt(s[:size-1], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return d * time.Duration(t), nil
+}
+
+func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
+ switch u {
+ case 'H':
+ return time.Hour, true
+ case 'M':
+ return time.Minute, true
+ case 'S':
+ return time.Second, true
+ case 'm':
+ return time.Millisecond, true
+ case 'u':
+ return time.Microsecond, true
+ case 'n':
+ return time.Nanosecond, true
+ default:
+ }
+ return
+}
+
+// isPermanentHTTPHeader checks whether hdr belongs to the list of
+// permenant request headers maintained by IANA.
+// http://www.iana.org/assignments/message-headers/message-headers.xml
+func isPermanentHTTPHeader(hdr string) bool {
+ switch hdr {
+ case
+ "Accept",
+ "Accept-Charset",
+ "Accept-Language",
+ "Accept-Ranges",
+ "Authorization",
+ "Cache-Control",
+ "Content-Type",
+ "Cookie",
+ "Date",
+ "Expect",
+ "From",
+ "Host",
+ "If-Match",
+ "If-Modified-Since",
+ "If-None-Match",
+ "If-Schedule-Tag-Match",
+ "If-Unmodified-Since",
+ "Max-Forwards",
+ "Origin",
+ "Pragma",
+ "Referer",
+ "User-Agent",
+ "Via",
+ "Warning":
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
new file mode 100644
index 000000000000..a5b3bd6a792c
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
@@ -0,0 +1,312 @@
+package runtime
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/golang/protobuf/ptypes/duration"
+ "github.com/golang/protobuf/ptypes/timestamp"
+ "github.com/golang/protobuf/ptypes/wrappers"
+)
+
+// String just returns the given string.
+// It is just for compatibility to other types.
+func String(val string) (string, error) {
+ return val, nil
+}
+
+// StringSlice converts 'val' where individual strings are separated by
+// 'sep' into a string slice.
+func StringSlice(val, sep string) ([]string, error) {
+ return strings.Split(val, sep), nil
+}
+
+// Bool converts the given string representation of a boolean value into bool.
+func Bool(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+// BoolSlice converts 'val' where individual booleans are separated by
+// 'sep' into a bool slice.
+func BoolSlice(val, sep string) ([]bool, error) {
+ s := strings.Split(val, sep)
+ values := make([]bool, len(s))
+ for i, v := range s {
+ value, err := Bool(v)
+ if err != nil {
+ return values, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Float64 converts the given string representation into representation of a floating point number into float64.
+func Float64(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+// Float64Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float64 slice.
+func Float64Slice(val, sep string) ([]float64, error) {
+ s := strings.Split(val, sep)
+ values := make([]float64, len(s))
+ for i, v := range s {
+ value, err := Float64(v)
+ if err != nil {
+ return values, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Float32 converts the given string representation of a floating point number into float32.
+func Float32(val string) (float32, error) {
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+// Float32Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float32 slice.
+func Float32Slice(val, sep string) ([]float32, error) {
+ s := strings.Split(val, sep)
+ values := make([]float32, len(s))
+ for i, v := range s {
+ value, err := Float32(v)
+ if err != nil {
+ return values, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Int64 converts the given string representation of an integer into int64.
+func Int64(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+// Int64Slice converts 'val' where individual integers are separated by
+// 'sep' into a int64 slice.
+func Int64Slice(val, sep string) ([]int64, error) {
+ s := strings.Split(val, sep)
+ values := make([]int64, len(s))
+ for i, v := range s {
+ value, err := Int64(v)
+ if err != nil {
+ return values, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Int32 converts the given string representation of an integer into int32.
+func Int32(val string) (int32, error) {
+ i, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(i), nil
+}
+
+// Int32Slice converts 'val' where individual integers are separated by
+// 'sep' into a int32 slice.
+func Int32Slice(val, sep string) ([]int32, error) {
+ s := strings.Split(val, sep)
+ values := make([]int32, len(s))
+ for i, v := range s {
+ value, err := Int32(v)
+ if err != nil {
+ return values, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Uint64 converts the given string representation of an integer into uint64.
+func Uint64(val string) (uint64, error) {
+ return strconv.ParseUint(val, 0, 64)
+}
+
+// Uint64Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint64 slice.
+func Uint64Slice(val, sep string) ([]uint64, error) {
+ s := strings.Split(val, sep)
+ values := make([]uint64, len(s))
+ for i, v := range s {
+ value, err := Uint64(v)
+ if err != nil {
+ return values, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Uint32 converts the given string representation of an integer into uint32.
+func Uint32(val string) (uint32, error) {
+ i, err := strconv.ParseUint(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(i), nil
+}
+
+// Uint32Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint32 slice.
+func Uint32Slice(val, sep string) ([]uint32, error) {
+ s := strings.Split(val, sep)
+ values := make([]uint32, len(s))
+ for i, v := range s {
+ value, err := Uint32(v)
+ if err != nil {
+ return values, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Bytes converts the given string representation of a byte sequence into a slice of bytes
+// A bytes sequence is encoded in URL-safe base64 without padding
+func Bytes(val string) ([]byte, error) {
+ b, err := base64.StdEncoding.DecodeString(val)
+ if err != nil {
+ b, err = base64.URLEncoding.DecodeString(val)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return b, nil
+}
+
+// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
+// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
+func BytesSlice(val, sep string) ([][]byte, error) {
+ s := strings.Split(val, sep)
+ values := make([][]byte, len(s))
+ for i, v := range s {
+ value, err := Bytes(v)
+ if err != nil {
+ return values, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
+func Timestamp(val string) (*timestamp.Timestamp, error) {
+ var r *timestamp.Timestamp
+ err := jsonpb.UnmarshalString(val, r)
+ return r, err
+}
+
+// Duration converts the given string into a timestamp.Duration.
+func Duration(val string) (*duration.Duration, error) {
+ var r *duration.Duration
+ err := jsonpb.UnmarshalString(val, r)
+ return r, err
+}
+
+// Enum converts the given string into an int32 that should be type casted into the
+// correct enum proto type.
+func Enum(val string, enumValMap map[string]int32) (int32, error) {
+ e, ok := enumValMap[val]
+ if ok {
+ return e, nil
+ }
+
+ i, err := Int32(val)
+ if err != nil {
+ return 0, fmt.Errorf("%s is not valid", val)
+ }
+ for _, v := range enumValMap {
+ if v == i {
+ return i, nil
+ }
+ }
+ return 0, fmt.Errorf("%s is not valid", val)
+}
+
+// EnumSlice converts 'val' where individual enums are separated by 'sep'
+// into a int32 slice. Each individual int32 should be type casted into the
+// correct enum proto type.
+func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
+ s := strings.Split(val, sep)
+ values := make([]int32, len(s))
+ for i, v := range s {
+ value, err := Enum(v, enumValMap)
+ if err != nil {
+ return values, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+/*
+ Support fot google.protobuf.wrappers on top of primitive types
+*/
+
+// StringValue well-known type support as wrapper around string type
+func StringValue(val string) (*wrappers.StringValue, error) {
+ return &wrappers.StringValue{Value: val}, nil
+}
+
+// FloatValue well-known type support as wrapper around float32 type
+func FloatValue(val string) (*wrappers.FloatValue, error) {
+ parsedVal, err := Float32(val)
+ return &wrappers.FloatValue{Value: parsedVal}, err
+}
+
+// DoubleValue well-known type support as wrapper around float64 type
+func DoubleValue(val string) (*wrappers.DoubleValue, error) {
+ parsedVal, err := Float64(val)
+ return &wrappers.DoubleValue{Value: parsedVal}, err
+}
+
+// BoolValue well-known type support as wrapper around bool type
+func BoolValue(val string) (*wrappers.BoolValue, error) {
+ parsedVal, err := Bool(val)
+ return &wrappers.BoolValue{Value: parsedVal}, err
+}
+
+// Int32Value well-known type support as wrapper around int32 type
+func Int32Value(val string) (*wrappers.Int32Value, error) {
+ parsedVal, err := Int32(val)
+ return &wrappers.Int32Value{Value: parsedVal}, err
+}
+
+// UInt32Value well-known type support as wrapper around uint32 type
+func UInt32Value(val string) (*wrappers.UInt32Value, error) {
+ parsedVal, err := Uint32(val)
+ return &wrappers.UInt32Value{Value: parsedVal}, err
+}
+
+// Int64Value well-known type support as wrapper around int64 type
+func Int64Value(val string) (*wrappers.Int64Value, error) {
+ parsedVal, err := Int64(val)
+ return &wrappers.Int64Value{Value: parsedVal}, err
+}
+
+// UInt64Value well-known type support as wrapper around uint64 type
+func UInt64Value(val string) (*wrappers.UInt64Value, error) {
+ parsedVal, err := Uint64(val)
+ return &wrappers.UInt64Value{Value: parsedVal}, err
+}
+
+// BytesValue well-known type support as wrapper around bytes[] type
+func BytesValue(val string) (*wrappers.BytesValue, error) {
+ parsedVal, err := Bytes(val)
+ return &wrappers.BytesValue{Value: parsedVal}, err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
new file mode 100644
index 000000000000..b6e5ddf7a9f1
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
@@ -0,0 +1,5 @@
+/*
+Package runtime contains runtime helper functions used by
+servers which protoc-gen-grpc-gateway generates.
+*/
+package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
new file mode 100644
index 000000000000..ad945788dc60
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
@@ -0,0 +1,146 @@
+package runtime
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes/any"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
+// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+func HTTPStatusFromCode(code codes.Code) int {
+ switch code {
+ case codes.OK:
+ return http.StatusOK
+ case codes.Canceled:
+ return http.StatusRequestTimeout
+ case codes.Unknown:
+ return http.StatusInternalServerError
+ case codes.InvalidArgument:
+ return http.StatusBadRequest
+ case codes.DeadlineExceeded:
+ return http.StatusGatewayTimeout
+ case codes.NotFound:
+ return http.StatusNotFound
+ case codes.AlreadyExists:
+ return http.StatusConflict
+ case codes.PermissionDenied:
+ return http.StatusForbidden
+ case codes.Unauthenticated:
+ return http.StatusUnauthorized
+ case codes.ResourceExhausted:
+ return http.StatusTooManyRequests
+ case codes.FailedPrecondition:
+ // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
+ return http.StatusBadRequest
+ case codes.Aborted:
+ return http.StatusConflict
+ case codes.OutOfRange:
+ return http.StatusBadRequest
+ case codes.Unimplemented:
+ return http.StatusNotImplemented
+ case codes.Internal:
+ return http.StatusInternalServerError
+ case codes.Unavailable:
+ return http.StatusServiceUnavailable
+ case codes.DataLoss:
+ return http.StatusInternalServerError
+ }
+
+ grpclog.Infof("Unknown gRPC error code: %v", code)
+ return http.StatusInternalServerError
+}
+
+var (
+ // HTTPError replies to the request with the error.
+ // You can set a custom function to this variable to customize error format.
+ HTTPError = DefaultHTTPError
+ // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest
+ OtherErrorHandler = DefaultOtherErrorHandler
+)
+
+type errorBody struct {
+ Error string `protobuf:"bytes,1,name=error" json:"error"`
+ // This is to make the error more compatible with users that expect errors to be Status objects:
+ // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
+ // It should be the exact same message as the Error field.
+ Message string `protobuf:"bytes,1,name=message" json:"message"`
+ Code int32 `protobuf:"varint,2,name=code" json:"code"`
+ Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
+}
+
+// Make this also conform to proto.Message for builtin JSONPb Marshaler
+func (e *errorBody) Reset() { *e = errorBody{} }
+func (e *errorBody) String() string { return proto.CompactTextString(e) }
+func (*errorBody) ProtoMessage() {}
+
+// DefaultHTTPError is the default implementation of HTTPError.
+// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body returned by this function is a JSON object,
+// which contains a member whose key is "error" and whose value is err.Error().
+func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
+ const fallback = `{"error": "failed to marshal error message"}`
+
+ s, ok := status.FromError(err)
+ if !ok {
+ s = status.New(codes.Unknown, err.Error())
+ }
+
+ w.Header().Del("Trailer")
+
+ contentType := marshaler.ContentType()
+ // Check marshaler on run time in order to keep backwards compatability
+ // An interface param needs to be added to the ContentType() function on
+ // the Marshal interface to be able to remove this check
+ if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
+ pb := s.Proto()
+ contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
+ }
+ w.Header().Set("Content-Type", contentType)
+
+ body := &errorBody{
+ Error: s.Message(),
+ Message: s.Message(),
+ Code: int32(s.Code()),
+ Details: s.Proto().GetDetails(),
+ }
+
+ buf, merr := marshaler.Marshal(body)
+ if merr != nil {
+ grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
+ w.WriteHeader(http.StatusInternalServerError)
+ if _, err := io.WriteString(w, fallback); err != nil {
+ grpclog.Infof("Failed to write response: %v", err)
+ }
+ return
+ }
+
+ md, ok := ServerMetadataFromContext(ctx)
+ if !ok {
+ grpclog.Infof("Failed to extract ServerMetadata from context")
+ }
+
+ handleForwardResponseServerMetadata(w, mux, md)
+ handleForwardResponseTrailerHeader(w, md)
+ st := HTTPStatusFromCode(s.Code())
+ w.WriteHeader(st)
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Infof("Failed to write response: %v", err)
+ }
+
+ handleForwardResponseTrailer(w, md)
+}
+
+// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
+// It simply writes a string representation of the given error into "w".
+func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
+ http.Error(w, msg, code)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
new file mode 100644
index 000000000000..e1cf7a91461f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
@@ -0,0 +1,70 @@
+package runtime
+
+import (
+ "encoding/json"
+ "io"
+ "strings"
+
+ "github.com/golang/protobuf/protoc-gen-go/generator"
+ "google.golang.org/genproto/protobuf/field_mask"
+)
+
+// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
+func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) {
+ fm := &field_mask.FieldMask{}
+ var root interface{}
+ if err := json.NewDecoder(r).Decode(&root); err != nil {
+ if err == io.EOF {
+ return fm, nil
+ }
+ return nil, err
+ }
+
+ queue := []fieldMaskPathItem{{node: root}}
+ for len(queue) > 0 {
+ // dequeue an item
+ item := queue[0]
+ queue = queue[1:]
+
+ if m, ok := item.node.(map[string]interface{}); ok {
+ // if the item is an object, then enqueue all of its children
+ for k, v := range m {
+ queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v})
+ }
+ } else if len(item.path) > 0 {
+ // otherwise, it's a leaf node so print its path
+ fm.Paths = append(fm.Paths, strings.Join(item.path, "."))
+ }
+ }
+
+ return fm, nil
+}
+
+// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
+type fieldMaskPathItem struct {
+ // the list of prior fields leading up to node
+ path []string
+
+ // a generic decoded json object the current item to inspect for further path extraction
+ node interface{}
+}
+
+// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic
+// that's used for naming protobuf fields in Go.
+func CamelCaseFieldMask(mask *field_mask.FieldMask) {
+ if mask == nil || mask.Paths == nil {
+ return
+ }
+
+ var newPaths []string
+ for _, path := range mask.Paths {
+ lowerCasedParts := strings.Split(path, ".")
+ var camelCasedParts []string
+ for _, part := range lowerCasedParts {
+ camelCasedParts = append(camelCasedParts, generator.CamelCase(part))
+ }
+ newPaths = append(newPaths, strings.Join(camelCasedParts, "."))
+ }
+
+ mask.Paths = newPaths
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
new file mode 100644
index 000000000000..2af900650dcd
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
@@ -0,0 +1,209 @@
+package runtime
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/textproto"
+
+ "context"
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/internal"
+ "google.golang.org/grpc/grpclog"
+)
+
+var errEmptyResponse = errors.New("empty response")
+
+// ForwardResponseStream forwards the stream from gRPC server to REST client.
+func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+ f, ok := w.(http.Flusher)
+ if !ok {
+ grpclog.Infof("Flush not supported in %T", w)
+ http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+ return
+ }
+
+ md, ok := ServerMetadataFromContext(ctx)
+ if !ok {
+ grpclog.Infof("Failed to extract ServerMetadata from context")
+ http.Error(w, "unexpected error", http.StatusInternalServerError)
+ return
+ }
+ handleForwardResponseServerMetadata(w, mux, md)
+
+ w.Header().Set("Transfer-Encoding", "chunked")
+ w.Header().Set("Content-Type", marshaler.ContentType())
+ if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+
+ var delimiter []byte
+ if d, ok := marshaler.(Delimited); ok {
+ delimiter = d.Delimiter()
+ } else {
+ delimiter = []byte("\n")
+ }
+
+ var wroteHeader bool
+ for {
+ resp, err := recv()
+ if err == io.EOF {
+ return
+ }
+ if err != nil {
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+ return
+ }
+ if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+ return
+ }
+
+ buf, err := marshaler.Marshal(streamChunk(ctx, resp, mux.streamErrorHandler))
+ if err != nil {
+ grpclog.Infof("Failed to marshal response chunk: %v", err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
+ return
+ }
+ if _, err = w.Write(buf); err != nil {
+ grpclog.Infof("Failed to send response chunk: %v", err)
+ return
+ }
+ wroteHeader = true
+ if _, err = w.Write(delimiter); err != nil {
+ grpclog.Infof("Failed to send delimiter chunk: %v", err)
+ return
+ }
+ f.Flush()
+ }
+}
+
+func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k, vs := range md.HeaderMD {
+ if h, ok := mux.outgoingHeaderMatcher(k); ok {
+ for _, v := range vs {
+ w.Header().Add(h, v)
+ }
+ }
+ }
+}
+
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
+ for k := range md.TrailerMD {
+ tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
+ w.Header().Add("Trailer", tKey)
+ }
+}
+
+func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
+ for k, vs := range md.TrailerMD {
+ tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
+ for _, v := range vs {
+ w.Header().Add(tKey, v)
+ }
+ }
+}
+
+// responseBody interface contains method for getting field for marshaling to the response body
+// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
+type responseBody interface {
+ XXX_ResponseBody() interface{}
+}
+
+// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
+func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+ md, ok := ServerMetadataFromContext(ctx)
+ if !ok {
+ grpclog.Infof("Failed to extract ServerMetadata from context")
+ }
+
+ handleForwardResponseServerMetadata(w, mux, md)
+ handleForwardResponseTrailerHeader(w, md)
+
+ contentType := marshaler.ContentType()
+ // Check marshaler on run time in order to keep backwards compatability
+ // An interface param needs to be added to the ContentType() function on
+ // the Marshal interface to be able to remove this check
+ if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
+ contentType = httpBodyMarshaler.ContentTypeFromMessage(resp)
+ }
+ w.Header().Set("Content-Type", contentType)
+
+ if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+ var buf []byte
+ var err error
+ if rb, ok := resp.(responseBody); ok {
+ buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
+ } else {
+ buf, err = marshaler.Marshal(resp)
+ }
+ if err != nil {
+ grpclog.Infof("Marshal error: %v", err)
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+
+ if _, err = w.Write(buf); err != nil {
+ grpclog.Infof("Failed to write response: %v", err)
+ }
+
+ handleForwardResponseTrailer(w, md)
+}
+
+func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
+ if len(opts) == 0 {
+ return nil
+ }
+ for _, opt := range opts {
+ if err := opt(ctx, w, resp); err != nil {
+ grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
+ return err
+ }
+ }
+ return nil
+}
+
+func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
+ serr := streamError(ctx, mux.streamErrorHandler, err)
+ if !wroteHeader {
+ w.WriteHeader(int(serr.HttpCode))
+ }
+ buf, merr := marshaler.Marshal(errorChunk(serr))
+ if merr != nil {
+ grpclog.Infof("Failed to marshal an error: %v", merr)
+ return
+ }
+ if _, werr := w.Write(buf); werr != nil {
+ grpclog.Infof("Failed to notify error to client: %v", werr)
+ return
+ }
+}
+
+// streamChunk returns a chunk in a response stream for the given result. The
+// given errHandler is used to render an error chunk if result is nil.
+func streamChunk(ctx context.Context, result proto.Message, errHandler StreamErrorHandlerFunc) map[string]proto.Message {
+ if result == nil {
+ return errorChunk(streamError(ctx, errHandler, errEmptyResponse))
+ }
+ return map[string]proto.Message{"result": result}
+}
+
+// streamError returns the payload for the final message in a response stream
+// that represents the given err.
+func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError {
+ serr := errHandler(ctx, err)
+ if serr != nil {
+ return serr
+ }
+ // TODO: log about misbehaving stream error handler?
+ return DefaultHTTPStreamErrorHandler(ctx, err)
+}
+
+func errorChunk(err *StreamError) map[string]proto.Message {
+ return map[string]proto.Message{"error": (*internal.StreamError)(err)}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
new file mode 100644
index 000000000000..f55285b5d6ca
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
@@ -0,0 +1,43 @@
+package runtime
+
+import (
+ "google.golang.org/genproto/googleapis/api/httpbody"
+)
+
+// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
+func SetHTTPBodyMarshaler(serveMux *ServeMux) {
+ serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
+ Marshaler: &JSONPb{OrigName: true},
+ }
+}
+
+// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
+// google.api.HttpBody message as the full response body if it is
+// the actual message used as the response. If not, then this will
+// simply fallback to the Marshaler specified as its default Marshaler.
+type HTTPBodyMarshaler struct {
+ Marshaler
+}
+
+// ContentType implementation to keep backwards compatability with marshal interface
+func (h *HTTPBodyMarshaler) ContentType() string {
+ return h.ContentTypeFromMessage(nil)
+}
+
+// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
+// its specified content type otherwise fall back to the default Marshaler.
+func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
+ if httpBody, ok := v.(*httpbody.HttpBody); ok {
+ return httpBody.GetContentType()
+ }
+ return h.Marshaler.ContentType()
+}
+
+// Marshal marshals "v" by returning the body bytes if v is a
+// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
+func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
+ if httpBody, ok := v.(*httpbody.HttpBody); ok {
+ return httpBody.Data, nil
+ }
+ return h.Marshaler.Marshal(v)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
new file mode 100644
index 000000000000..f9d3a585a4c0
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
@@ -0,0 +1,45 @@
+package runtime
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
+// with the standard "encoding/json" package of Golang.
+// Although it is generally faster for simple proto messages than JSONPb,
+// it does not support advanced features of protobuf, e.g. map, oneof, ....
+//
+// The NewEncoder and NewDecoder types return *json.Encoder and
+// *json.Decoder respectively.
+type JSONBuiltin struct{}
+
+// ContentType always Returns "application/json".
+func (*JSONBuiltin) ContentType() string {
+ return "application/json"
+}
+
+// Marshal marshals "v" into JSON
+func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+// Unmarshal unmarshals JSON data into "v".
+func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
+ return json.NewDecoder(r)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
+ return json.NewEncoder(w)
+}
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONBuiltin) Delimiter() []byte {
+ return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
new file mode 100644
index 000000000000..f0de351b212d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
@@ -0,0 +1,262 @@
+package runtime
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/golang/protobuf/proto"
+)
+
+// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
+// with the "github.com/golang/protobuf/jsonpb".
+// It supports fully functionality of protobuf unlike JSONBuiltin.
+//
+// The NewDecoder method returns a DecoderWrapper, so the underlying
+// *json.Decoder methods can be used.
+type JSONPb jsonpb.Marshaler
+
+// ContentType always returns "application/json".
+func (*JSONPb) ContentType() string {
+ return "application/json"
+}
+
+// Marshal marshals "v" into JSON.
+func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
+ if _, ok := v.(proto.Message); !ok {
+ return j.marshalNonProtoField(v)
+ }
+
+ var buf bytes.Buffer
+ if err := j.marshalTo(&buf, v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
+ p, ok := v.(proto.Message)
+ if !ok {
+ buf, err := j.marshalNonProtoField(v)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(buf)
+ return err
+ }
+ return (*jsonpb.Marshaler)(j).Marshal(w, p)
+}
+
+var (
+ // protoMessageType is stored to prevent constant lookup of the same type at runtime.
+ protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+)
+
+// marshalNonProto marshals a non-message field of a protobuf message.
+// This function does not correctly marshals arbitrary data structure into JSON,
+// but it is only capable of marshaling non-message field values of protobuf,
+// i.e. primitive types, enums; pointers to primitives or enums; maps from
+// integer/string types to primitives/enums/pointers to messages.
+func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
+ if v == nil {
+ return []byte("null"), nil
+ }
+ rv := reflect.ValueOf(v)
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return []byte("null"), nil
+ }
+ rv = rv.Elem()
+ }
+
+ if rv.Kind() == reflect.Slice {
+ if rv.IsNil() {
+ if j.EmitDefaults {
+ return []byte("[]"), nil
+ }
+ return []byte("null"), nil
+ }
+
+ if rv.Type().Elem().Implements(protoMessageType) {
+ var buf bytes.Buffer
+ err := buf.WriteByte('[')
+ if err != nil {
+ return nil, err
+ }
+ for i := 0; i < rv.Len(); i++ {
+ if i != 0 {
+ err = buf.WriteByte(',')
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+ return nil, err
+ }
+ }
+ err = buf.WriteByte(']')
+ if err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+ }
+ }
+
+ if rv.Kind() == reflect.Map {
+ m := make(map[string]*json.RawMessage)
+ for _, k := range rv.MapKeys() {
+ buf, err := j.Marshal(rv.MapIndex(k).Interface())
+ if err != nil {
+ return nil, err
+ }
+ m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
+ }
+ if j.Indent != "" {
+ return json.MarshalIndent(m, "", j.Indent)
+ }
+ return json.Marshal(m)
+ }
+ if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
+ return json.Marshal(enum.String())
+ }
+ return json.Marshal(rv.Interface())
+}
+
+// Unmarshal unmarshals JSON "data" into "v"
+func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
+ return unmarshalJSONPb(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
+ d := json.NewDecoder(r)
+ return DecoderWrapper{Decoder: d}
+}
+
+// DecoderWrapper is a wrapper around a *json.Decoder that adds
+// support for protos to the Decode method.
+type DecoderWrapper struct {
+ *json.Decoder
+}
+
+// Decode wraps the embedded decoder's Decode method to support
+// protos using a jsonpb.Unmarshaler.
+func (d DecoderWrapper) Decode(v interface{}) error {
+ return decodeJSONPb(d.Decoder, v)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
+ return EncoderFunc(func(v interface{}) error {
+ if err := j.marshalTo(w, v); err != nil {
+ return err
+ }
+ // mimic json.Encoder by adding a newline (makes output
+ // easier to read when it contains multiple encoded items)
+ _, err := w.Write(j.Delimiter())
+ return err
+ })
+}
+
+func unmarshalJSONPb(data []byte, v interface{}) error {
+ d := json.NewDecoder(bytes.NewReader(data))
+ return decodeJSONPb(d, v)
+}
+
+func decodeJSONPb(d *json.Decoder, v interface{}) error {
+ p, ok := v.(proto.Message)
+ if !ok {
+ return decodeNonProtoField(d, v)
+ }
+ unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
+ return unmarshaler.UnmarshalNext(d, p)
+}
+
+func decodeNonProtoField(d *json.Decoder, v interface{}) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer", v)
+ }
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ if rv.Type().ConvertibleTo(typeProtoMessage) {
+ unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
+ return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
+ }
+ rv = rv.Elem()
+ }
+ if rv.Kind() == reflect.Map {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ conv, ok := convFromType[rv.Type().Key().Kind()]
+ if !ok {
+ return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
+ }
+
+ m := make(map[string]*json.RawMessage)
+ if err := d.Decode(&m); err != nil {
+ return err
+ }
+ for k, v := range m {
+ result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
+ if err := result[1].Interface(); err != nil {
+ return err.(error)
+ }
+ bk := result[0]
+ bv := reflect.New(rv.Type().Elem())
+ if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
+ return err
+ }
+ rv.SetMapIndex(bk, bv.Elem())
+ }
+ return nil
+ }
+ if _, ok := rv.Interface().(protoEnum); ok {
+ var repr interface{}
+ if err := d.Decode(&repr); err != nil {
+ return err
+ }
+ switch repr.(type) {
+ case string:
+ // TODO(yugui) Should use proto.StructProperties?
+ return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
+ case float64:
+ rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
+ return nil
+ default:
+ return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
+ }
+ }
+ return d.Decode(v)
+}
+
+type protoEnum interface {
+ fmt.Stringer
+ EnumDescriptor() ([]byte, []int)
+}
+
+var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONPb) Delimiter() []byte {
+ return []byte("\n")
+}
+
+// allowUnknownFields helps not to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+var allowUnknownFields = true
+
+// DisallowUnknownFields enables option in decoder (unmarshaller) to
+// return an error when it finds an unknown field. This function must be
+// called before using the JSON marshaller.
+func DisallowUnknownFields() {
+ allowUnknownFields = false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
new file mode 100644
index 000000000000..f65d1a2676b8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
@@ -0,0 +1,62 @@
+package runtime
+
+import (
+ "io"
+
+ "errors"
+ "github.com/golang/protobuf/proto"
+ "io/ioutil"
+)
+
+// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
+type ProtoMarshaller struct{}
+
+// ContentType always returns "application/octet-stream".
+func (*ProtoMarshaller) ContentType() string {
+ return "application/octet-stream"
+}
+
+// Marshal marshals "value" into Proto
+func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
+ message, ok := value.(proto.Message)
+ if !ok {
+ return nil, errors.New("unable to marshal non proto field")
+ }
+ return proto.Marshal(message)
+}
+
+// Unmarshal unmarshals proto "data" into "value"
+func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
+ message, ok := value.(proto.Message)
+ if !ok {
+ return errors.New("unable to unmarshal non proto field")
+ }
+ return proto.Unmarshal(data, message)
+}
+
+// NewDecoder returns a Decoder which reads proto stream from "reader".
+func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
+ return DecoderFunc(func(value interface{}) error {
+ buffer, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+ return marshaller.Unmarshal(buffer, value)
+ })
+}
+
+// NewEncoder returns an Encoder which writes proto stream into "writer".
+func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
+ return EncoderFunc(func(value interface{}) error {
+ buffer, err := marshaller.Marshal(value)
+ if err != nil {
+ return err
+ }
+ _, err = writer.Write(buffer)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
new file mode 100644
index 000000000000..98fe6e88ac59
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
@@ -0,0 +1,48 @@
+package runtime
+
+import (
+ "io"
+)
+
+// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
+type Marshaler interface {
+ // Marshal marshals "v" into byte sequence.
+ Marshal(v interface{}) ([]byte, error)
+ // Unmarshal unmarshals "data" into "v".
+ // "v" must be a pointer value.
+ Unmarshal(data []byte, v interface{}) error
+ // NewDecoder returns a Decoder which reads byte sequence from "r".
+ NewDecoder(r io.Reader) Decoder
+ // NewEncoder returns an Encoder which writes bytes sequence into "w".
+ NewEncoder(w io.Writer) Encoder
+ // ContentType returns the Content-Type which this marshaler is responsible for.
+ ContentType() string
+}
+
+// Decoder decodes a byte sequence
+type Decoder interface {
+ Decode(v interface{}) error
+}
+
+// Encoder encodes gRPC payloads / fields into byte sequence.
+type Encoder interface {
+ Encode(v interface{}) error
+}
+
+// DecoderFunc adapts an decoder function into Decoder.
+type DecoderFunc func(v interface{}) error
+
+// Decode delegates invocations to the underlying function itself.
+func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
+
+// EncoderFunc adapts an encoder function into Encoder
+type EncoderFunc func(v interface{}) error
+
+// Encode delegates invocations to the underlying function itself.
+func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
+
+// Delimited defines the streaming delimiter.
+type Delimited interface {
+ // Delimiter returns the record seperator for the stream.
+ Delimiter() []byte
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
new file mode 100644
index 000000000000..5cc53ae4f681
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
@@ -0,0 +1,91 @@
+package runtime
+
+import (
+ "errors"
+ "net/http"
+)
+
+// MIMEWildcard is the fallback MIME type used for requests which do not match
+// a registered MIME type.
+const MIMEWildcard = "*"
+
+var (
+ acceptHeader = http.CanonicalHeaderKey("Accept")
+ contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
+
+ defaultMarshaler = &JSONPb{OrigName: true}
+)
+
+// MarshalerForRequest returns the inbound/outbound marshalers for this request.
+// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
+// If it isn't set (or the request Content-Type is empty), checks for "*".
+// If there are multiple Content-Type headers set, choose the first one that it can
+// exactly match in the registry.
+// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
+func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
+ for _, acceptVal := range r.Header[acceptHeader] {
+ if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
+ outbound = m
+ break
+ }
+ }
+
+ for _, contentTypeVal := range r.Header[contentTypeHeader] {
+ if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
+ inbound = m
+ break
+ }
+ }
+
+ if inbound == nil {
+ inbound = mux.marshalers.mimeMap[MIMEWildcard]
+ }
+ if outbound == nil {
+ outbound = inbound
+ }
+
+ return inbound, outbound
+}
+
+// marshalerRegistry is a mapping from MIME types to Marshalers.
+type marshalerRegistry struct {
+ mimeMap map[string]Marshaler
+}
+
+// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
+// MIME type).
+func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
+ if len(mime) == 0 {
+ return errors.New("empty MIME type")
+ }
+
+ m.mimeMap[mime] = marshaler
+
+ return nil
+}
+
+// makeMarshalerMIMERegistry returns a new registry of marshalers.
+// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
+//
+// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
+// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
+// with a "application/json" Content-Type.
+// "*" can be used to match any Content-Type.
+// This can be attached to a ServerMux with the marshaler option.
+func makeMarshalerMIMERegistry() marshalerRegistry {
+ return marshalerRegistry{
+ mimeMap: map[string]Marshaler{
+ MIMEWildcard: defaultMarshaler,
+ },
+ }
+}
+
+// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
+// Marshalers to a MIME type in mux.
+func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
+ return func(mux *ServeMux) {
+ if err := mux.marshalers.add(mime, marshaler); err != nil {
+ panic(err)
+ }
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
new file mode 100644
index 000000000000..1da3a58854db
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
@@ -0,0 +1,303 @@
+package runtime
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "net/textproto"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// A HandlerFunc handles a specific pair of path pattern and HTTP method.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
+
+// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when
+// a request is received with a URI path that does not match any registered
+// service method.
+//
+// Since gRPC servers return an "Unimplemented" code for requests with an
+// unrecognized URI path, this error also has a gRPC "Unimplemented" code.
+var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
+
+// ServeMux is a request multiplexer for grpc-gateway.
+// It matches http requests to patterns and invokes the corresponding handler.
+type ServeMux struct {
+ // handlers maps HTTP method to a list of handlers.
+ handlers map[string][]handler
+ forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
+ marshalers marshalerRegistry
+ incomingHeaderMatcher HeaderMatcherFunc
+ outgoingHeaderMatcher HeaderMatcherFunc
+ metadataAnnotators []func(context.Context, *http.Request) metadata.MD
+ streamErrorHandler StreamErrorHandlerFunc
+ protoErrorHandler ProtoErrorHandlerFunc
+ disablePathLengthFallback bool
+ lastMatchWins bool
+}
+
+// ServeMuxOption is an option that can be given to a ServeMux on construction.
+type ServeMuxOption func(*ServeMux)
+
+// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
+//
+// forwardResponseOption is an option that will be called on the relevant context.Context,
+// http.ResponseWriter, and proto.Message before every forwarded response.
+//
+// The message may be nil in the case where just a header is being sent.
+func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
+ }
+}
+
+// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
+type HeaderMatcherFunc func(string) (string, bool)
+
+// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
+// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
+// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
+func DefaultHeaderMatcher(key string) (string, bool) {
+ key = textproto.CanonicalMIMEHeaderKey(key)
+ if isPermanentHTTPHeader(key) {
+ return MetadataPrefix + key, true
+ } else if strings.HasPrefix(key, MetadataHeaderPrefix) {
+ return key[len(MetadataHeaderPrefix):], true
+ }
+ return "", false
+}
+
+// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
+//
+// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
+func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ return func(mux *ServeMux) {
+ mux.incomingHeaderMatcher = fn
+ }
+}
+
+// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return modified header.
+func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ return func(mux *ServeMux) {
+ mux.outgoingHeaderMatcher = fn
+ }
+}
+
+// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
+// is reading token from cookie and adding it in gRPC context.
+func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
+ }
+}
+
+// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used to handle an error as general proto message defined by gRPC.
+// The response including body and status is not backward compatible with the default error handler.
+// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization.
+func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.protoErrorHandler = fn
+ }
+}
+
+// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
+func WithDisablePathLengthFallback() ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.disablePathLengthFallback = true
+ }
+}
+
+// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
+// error handler, which allows for customizing the error trailer for server-streaming
+// calls.
+//
+// For stream errors that occur before any response has been written, the mux's
+// ProtoErrorHandler will be invoked. However, once data has been written, the errors must
+// be handled differently: they must be included in the response body. The response body's
+// final message will include the error details returned by the stream error handler.
+func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.streamErrorHandler = fn
+ }
+}
+
+// WithLastMatchWins returns a ServeMuxOption that will enable "last
+// match wins" behavior, where if multiple path patterns match a
+// request path, the last one defined in the .proto file will be used.
+func WithLastMatchWins() ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.lastMatchWins = true
+ }
+}
+
+// NewServeMux returns a new ServeMux whose internal mapping is empty.
+func NewServeMux(opts ...ServeMuxOption) *ServeMux {
+ serveMux := &ServeMux{
+ handlers: make(map[string][]handler),
+ forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
+ marshalers: makeMarshalerMIMERegistry(),
+ streamErrorHandler: DefaultHTTPStreamErrorHandler,
+ }
+
+ for _, opt := range opts {
+ opt(serveMux)
+ }
+
+ if serveMux.protoErrorHandler != nil {
+ HTTPError = serveMux.protoErrorHandler
+ // OtherErrorHandler is no longer used when protoErrorHandler is set.
+ // Overwritten by a special error handler to return Unknown.
+ OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) {
+ ctx := context.Background()
+ _, outboundMarshaler := MarshalerForRequest(serveMux, r)
+ sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler")
+ serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr)
+ }
+ }
+
+ if serveMux.incomingHeaderMatcher == nil {
+ serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
+ }
+
+ if serveMux.outgoingHeaderMatcher == nil {
+ serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
+ return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+ }
+ }
+
+ return serveMux
+}
+
+// Handle associates "h" to the pair of HTTP method and path pattern.
+func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
+ if s.lastMatchWins {
+ s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...)
+ } else {
+ s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
+ }
+}
+
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
+func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ path := r.URL.Path
+ if !strings.HasPrefix(path, "/") {
+ if s.protoErrorHandler != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
+ s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+ } else {
+ OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
+ }
+ return
+ }
+
+ components := strings.Split(path[1:], "/")
+ l := len(components)
+ var verb string
+ if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
+ if s.protoErrorHandler != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
+ } else {
+ OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
+ }
+ return
+ } else if idx > 0 {
+ c := components[l-1]
+ components[l-1], verb = c[:idx], c[idx+1:]
+ }
+
+ if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
+ r.Method = strings.ToUpper(override)
+ if err := r.ParseForm(); err != nil {
+ if s.protoErrorHandler != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ sterr := status.Error(codes.InvalidArgument, err.Error())
+ s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+ } else {
+ OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
+ }
+ return
+ }
+ }
+ for _, h := range s.handlers[r.Method] {
+ pathParams, err := h.pat.Match(components, verb)
+ if err != nil {
+ continue
+ }
+ h.h(w, r, pathParams)
+ return
+ }
+
+ // lookup other methods to handle fallback from GET to POST and
+ // to determine if it is MethodNotAllowed or NotFound.
+ for m, handlers := range s.handlers {
+ if m == r.Method {
+ continue
+ }
+ for _, h := range handlers {
+ pathParams, err := h.pat.Match(components, verb)
+ if err != nil {
+ continue
+ }
+ // X-HTTP-Method-Override is optional. Always allow fallback to POST.
+ if s.isPathLengthFallback(r) {
+ if err := r.ParseForm(); err != nil {
+ if s.protoErrorHandler != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ sterr := status.Error(codes.InvalidArgument, err.Error())
+ s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+ } else {
+ OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
+ }
+ return
+ }
+ h.h(w, r, pathParams)
+ return
+ }
+ if s.protoErrorHandler != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
+ } else {
+ OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
+ }
+ return
+ }
+ }
+
+ if s.protoErrorHandler != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
+ } else {
+ OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
+ }
+}
+
+// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
+func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
+ return s.forwardResponseOptions
+}
+
+func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
+ return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
+}
+
+type handler struct {
+ pat Pattern
+ h HandlerFunc
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
new file mode 100644
index 000000000000..09053695da7e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
@@ -0,0 +1,262 @@
+package runtime
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "google.golang.org/grpc/grpclog"
+)
+
+var (
+ // ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
+ ErrNotMatch = errors.New("not match to the path pattern")
+ // ErrInvalidPattern indicates that the given definition of Pattern is not valid.
+ ErrInvalidPattern = errors.New("invalid pattern")
+)
+
+type op struct {
+ code utilities.OpCode
+ operand int
+}
+
+// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
+type Pattern struct {
+ // ops is a list of operations
+ ops []op
+ // pool is a constant pool indexed by the operands or vars.
+ pool []string
+ // vars is a list of variables names to be bound by this pattern
+ vars []string
+ // stacksize is the max depth of the stack
+ stacksize int
+ // tailLen is the length of the fixed-size segments after a deep wildcard
+ tailLen int
+ // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
+ verb string
+ // assumeColonVerb indicates whether a path suffix after a final
+ // colon may only be interpreted as a verb.
+ assumeColonVerb bool
+}
+
+type patternOptions struct {
+ assumeColonVerb bool
+}
+
+// PatternOpt is an option for creating Patterns.
+type PatternOpt func(*patternOptions)
+
+// NewPattern returns a new Pattern from the given definition values.
+// "ops" is a sequence of op codes. "pool" is a constant pool.
+// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
+// "version" must be 1 for now.
+// It returns an error if the given definition is invalid.
+func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) {
+ options := patternOptions{
+ assumeColonVerb: true,
+ }
+ for _, o := range opts {
+ o(&options)
+ }
+
+ if version != 1 {
+ grpclog.Infof("unsupported version: %d", version)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ l := len(ops)
+ if l%2 != 0 {
+ grpclog.Infof("odd number of ops codes: %d", l)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ var (
+ typedOps []op
+ stack, maxstack int
+ tailLen int
+ pushMSeen bool
+ vars []string
+ )
+ for i := 0; i < l; i += 2 {
+ op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush:
+ if pushMSeen {
+ tailLen++
+ }
+ stack++
+ case utilities.OpPushM:
+ if pushMSeen {
+ grpclog.Infof("pushM appears twice")
+ return Pattern{}, ErrInvalidPattern
+ }
+ pushMSeen = true
+ stack++
+ case utilities.OpLitPush:
+ if op.operand < 0 || len(pool) <= op.operand {
+ grpclog.Infof("negative literal index: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ if pushMSeen {
+ tailLen++
+ }
+ stack++
+ case utilities.OpConcatN:
+ if op.operand <= 0 {
+ grpclog.Infof("negative concat size: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ stack -= op.operand
+ if stack < 0 {
+ grpclog.Print("stack underflow")
+ return Pattern{}, ErrInvalidPattern
+ }
+ stack++
+ case utilities.OpCapture:
+ if op.operand < 0 || len(pool) <= op.operand {
+ grpclog.Infof("variable name index out of bound: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ v := pool[op.operand]
+ op.operand = len(vars)
+ vars = append(vars, v)
+ stack--
+ if stack < 0 {
+ grpclog.Infof("stack underflow")
+ return Pattern{}, ErrInvalidPattern
+ }
+ default:
+ grpclog.Infof("invalid opcode: %d", op.code)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ if maxstack < stack {
+ maxstack = stack
+ }
+ typedOps = append(typedOps, op)
+ }
+ return Pattern{
+ ops: typedOps,
+ pool: pool,
+ vars: vars,
+ stacksize: maxstack,
+ tailLen: tailLen,
+ verb: verb,
+ assumeColonVerb: options.assumeColonVerb,
+ }, nil
+}
+
+// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
+func MustPattern(p Pattern, err error) Pattern {
+ if err != nil {
+ grpclog.Fatalf("Pattern initialization failed: %v", err)
+ }
+ return p
+}
+
+// Match examines components if it matches to the Pattern.
+// If it matches, the function returns a mapping from field paths to their captured values.
+// If otherwise, the function returns an error.
+func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
+ if p.verb != verb {
+ if p.assumeColonVerb || p.verb != "" {
+ return nil, ErrNotMatch
+ }
+ if len(components) == 0 {
+ components = []string{":" + verb}
+ } else {
+ components = append([]string{}, components...)
+ components[len(components)-1] += ":" + verb
+ }
+ verb = ""
+ }
+
+ var pos int
+ stack := make([]string, 0, p.stacksize)
+ captured := make([]string, len(p.vars))
+ l := len(components)
+ for _, op := range p.ops {
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush, utilities.OpLitPush:
+ if pos >= l {
+ return nil, ErrNotMatch
+ }
+ c := components[pos]
+ if op.code == utilities.OpLitPush {
+ if lit := p.pool[op.operand]; c != lit {
+ return nil, ErrNotMatch
+ }
+ }
+ stack = append(stack, c)
+ pos++
+ case utilities.OpPushM:
+ end := len(components)
+ if end < pos+p.tailLen {
+ return nil, ErrNotMatch
+ }
+ end -= p.tailLen
+ stack = append(stack, strings.Join(components[pos:end], "/"))
+ pos = end
+ case utilities.OpConcatN:
+ n := op.operand
+ l := len(stack) - n
+ stack = append(stack[:l], strings.Join(stack[l:], "/"))
+ case utilities.OpCapture:
+ n := len(stack) - 1
+ captured[op.operand] = stack[n]
+ stack = stack[:n]
+ }
+ }
+ if pos < l {
+ return nil, ErrNotMatch
+ }
+ bindings := make(map[string]string)
+ for i, val := range captured {
+ bindings[p.vars[i]] = val
+ }
+ return bindings, nil
+}
+
+// Verb returns the verb part of the Pattern.
+func (p Pattern) Verb() string { return p.verb }
+
+func (p Pattern) String() string {
+ var stack []string
+ for _, op := range p.ops {
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush:
+ stack = append(stack, "*")
+ case utilities.OpLitPush:
+ stack = append(stack, p.pool[op.operand])
+ case utilities.OpPushM:
+ stack = append(stack, "**")
+ case utilities.OpConcatN:
+ n := op.operand
+ l := len(stack) - n
+ stack = append(stack[:l], strings.Join(stack[l:], "/"))
+ case utilities.OpCapture:
+ n := len(stack) - 1
+ stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
+ }
+ }
+ segs := strings.Join(stack, "/")
+ if p.verb != "" {
+ return fmt.Sprintf("/%s:%s", segs, p.verb)
+ }
+ return "/" + segs
+}
+
+// AssumeColonVerbOpt indicates whether a path suffix after a final
+// colon may only be interpreted as a verb.
+func AssumeColonVerbOpt(val bool) PatternOpt {
+ return PatternOpt(func(o *patternOptions) {
+ o.assumeColonVerb = val
+ })
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
new file mode 100644
index 000000000000..a3151e2a5528
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
@@ -0,0 +1,80 @@
+package runtime
+
+import (
+ "github.com/golang/protobuf/proto"
+)
+
+// StringP returns a pointer to a string whose pointee is same as the given string value.
+func StringP(val string) (*string, error) {
+ return proto.String(val), nil
+}
+
+// BoolP parses the given string representation of a boolean value,
+// and returns a pointer to a bool whose value is same as the parsed value.
+func BoolP(val string) (*bool, error) {
+ b, err := Bool(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Bool(b), nil
+}
+
+// Float64P parses the given string representation of a floating point number,
+// and returns a pointer to a float64 whose value is same as the parsed number.
+func Float64P(val string) (*float64, error) {
+ f, err := Float64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Float64(f), nil
+}
+
+// Float32P parses the given string representation of a floating point number,
+// and returns a pointer to a float32 whose value is same as the parsed number.
+func Float32P(val string) (*float32, error) {
+ f, err := Float32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Float32(f), nil
+}
+
+// Int64P parses the given string representation of an integer
+// and returns a pointer to a int64 whose value is same as the parsed integer.
+func Int64P(val string) (*int64, error) {
+ i, err := Int64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Int64(i), nil
+}
+
+// Int32P parses the given string representation of an integer
+// and returns a pointer to a int32 whose value is same as the parsed integer.
+func Int32P(val string) (*int32, error) {
+ i, err := Int32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Int32(i), err
+}
+
+// Uint64P parses the given string representation of an integer
+// and returns a pointer to a uint64 whose value is same as the parsed integer.
+func Uint64P(val string) (*uint64, error) {
+ i, err := Uint64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Uint64(i), err
+}
+
+// Uint32P parses the given string representation of an integer
+// and returns a pointer to a uint32 whose value is same as the parsed integer.
+func Uint32P(val string) (*uint32, error) {
+ i, err := Uint32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Uint32(i), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
new file mode 100644
index 000000000000..ca76324efb1f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
@@ -0,0 +1,106 @@
+package runtime
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/ptypes/any"
+ "github.com/grpc-ecosystem/grpc-gateway/internal"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
+// a proto struct used to represent error at the end of a stream.
+type StreamErrorHandlerFunc func(context.Context, error) *StreamError
+
+// StreamError is the payload for the final message in a server stream in the event that the server returns an
+// error after a response message has already been sent.
+type StreamError internal.StreamError
+
+// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
+type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
+
+var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
+
+// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
+// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body returned by this function is a Status message marshaled by a Marshaler.
+//
+// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
+func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
+ // return Internal when Marshal failed
+ const fallback = `{"code": 13, "message": "failed to marshal error message"}`
+
+ s, ok := status.FromError(err)
+ if !ok {
+ s = status.New(codes.Unknown, err.Error())
+ }
+
+ w.Header().Del("Trailer")
+
+ contentType := marshaler.ContentType()
+ // Check marshaler on run time in order to keep backwards compatability
+ // An interface param needs to be added to the ContentType() function on
+ // the Marshal interface to be able to remove this check
+ if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
+ pb := s.Proto()
+ contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
+ }
+ w.Header().Set("Content-Type", contentType)
+
+ buf, merr := marshaler.Marshal(s.Proto())
+ if merr != nil {
+ grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
+ w.WriteHeader(http.StatusInternalServerError)
+ if _, err := io.WriteString(w, fallback); err != nil {
+ grpclog.Infof("Failed to write response: %v", err)
+ }
+ return
+ }
+
+ md, ok := ServerMetadataFromContext(ctx)
+ if !ok {
+ grpclog.Infof("Failed to extract ServerMetadata from context")
+ }
+
+ handleForwardResponseServerMetadata(w, mux, md)
+ handleForwardResponseTrailerHeader(w, md)
+ st := HTTPStatusFromCode(s.Code())
+ w.WriteHeader(st)
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Infof("Failed to write response: %v", err)
+ }
+
+ handleForwardResponseTrailer(w, md)
+}
+
+// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
+// default logic.
+//
+// It extracts the gRPC status from err if possible. The fields of the status are
+// used to populate the returned StreamError, and the HTTP status code is derived
+// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
+// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
+func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
+ grpcCode := codes.Unknown
+ grpcMessage := err.Error()
+ var grpcDetails []*any.Any
+ if s, ok := status.FromError(err); ok {
+ grpcCode = s.Code()
+ grpcMessage = s.Message()
+ grpcDetails = s.Proto().GetDetails()
+ }
+ httpCode := HTTPStatusFromCode(grpcCode)
+ return &StreamError{
+ GrpcCode: int32(grpcCode),
+ HttpCode: int32(httpCode),
+ Message: grpcMessage,
+ HttpStatus: http.StatusText(httpCode),
+ Details: grpcDetails,
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
new file mode 100644
index 000000000000..5fbba5e8e8b5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
@@ -0,0 +1,391 @@
+package runtime
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "google.golang.org/grpc/grpclog"
+)
+
+// PopulateQueryParameters populates "values" into "msg".
+// A value is ignored if its key starts with one of the elements in "filter".
+func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+ for key, values := range values {
+ re, err := regexp.Compile("^(.*)\\[(.*)\\]$")
+ if err != nil {
+ return err
+ }
+ match := re.FindStringSubmatch(key)
+ if len(match) == 3 {
+ key = match[1]
+ values = append([]string{match[2]}, values...)
+ }
+ fieldPath := strings.Split(key, ".")
+ if filter.HasCommonPrefix(fieldPath) {
+ continue
+ }
+ if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// PopulateFieldFromPath sets a value in a nested Protobuf structure.
+// It instantiates missing protobuf fields as it goes.
+func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
+ fieldPath := strings.Split(fieldPathString, ".")
+ return populateFieldValueFromPath(msg, fieldPath, []string{value})
+}
+
+func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
+ m := reflect.ValueOf(msg)
+ if m.Kind() != reflect.Ptr {
+ return fmt.Errorf("unexpected type %T: %v", msg, msg)
+ }
+ var props *proto.Properties
+ m = m.Elem()
+ for i, fieldName := range fieldPath {
+ isLast := i == len(fieldPath)-1
+ if !isLast && m.Kind() != reflect.Struct {
+ return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
+ }
+ var f reflect.Value
+ var err error
+ f, props, err = fieldByProtoName(m, fieldName)
+ if err != nil {
+ return err
+ } else if !f.IsValid() {
+ grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
+ return nil
+ }
+
+ switch f.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
+ if !isLast {
+ return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
+ }
+ m = f
+ case reflect.Slice:
+ if !isLast {
+ return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
+ }
+ // Handle []byte
+ if f.Type().Elem().Kind() == reflect.Uint8 {
+ m = f
+ break
+ }
+ return populateRepeatedField(f, values, props)
+ case reflect.Ptr:
+ if f.IsNil() {
+ m = reflect.New(f.Type().Elem())
+ f.Set(m.Convert(f.Type()))
+ }
+ m = f.Elem()
+ continue
+ case reflect.Struct:
+ m = f
+ continue
+ case reflect.Map:
+ if !isLast {
+ return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
+ }
+ return populateMapField(f, values, props)
+ default:
+ return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
+ }
+ }
+ switch len(values) {
+ case 0:
+ return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
+ case 1:
+ default:
+ grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
+ }
+ return populateField(m, values[0], props)
+}
+
+// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
+// "m" must be a struct value. It returns zero reflect.Value if no such field found.
+func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
+ props := proto.GetProperties(m.Type())
+
+ // look up field name in oneof map
+ if op, ok := props.OneofTypes[name]; ok {
+ v := reflect.New(op.Type.Elem())
+ field := m.Field(op.Field)
+ if !field.IsNil() {
+ return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
+ }
+ field.Set(v)
+ return v.Elem().Field(0), op.Prop, nil
+ }
+
+ for _, p := range props.Prop {
+ if p.OrigName == name {
+ return m.FieldByName(p.Name), p, nil
+ }
+ if p.JSONName == name {
+ return m.FieldByName(p.Name), p, nil
+ }
+ }
+ return reflect.Value{}, nil, nil
+}
+
+func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
+ if len(values) != 2 {
+ return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
+ }
+
+ key, value := values[0], values[1]
+ keyType := f.Type().Key()
+ valueType := f.Type().Elem()
+ if f.IsNil() {
+ f.Set(reflect.MakeMap(f.Type()))
+ }
+
+ keyConv, ok := convFromType[keyType.Kind()]
+ if !ok {
+ return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
+ }
+ valueConv, ok := convFromType[valueType.Kind()]
+ if !ok {
+ return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
+ }
+
+ keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
+ if err := keyV[1].Interface(); err != nil {
+ return err.(error)
+ }
+ valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
+ if err := valueV[1].Interface(); err != nil {
+ return err.(error)
+ }
+
+ f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
+
+ return nil
+}
+
+func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
+ elemType := f.Type().Elem()
+
+ // is the destination field a slice of an enumeration type?
+ if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
+ return populateFieldEnumRepeated(f, values, enumValMap)
+ }
+
+ conv, ok := convFromType[elemType.Kind()]
+ if !ok {
+ return fmt.Errorf("unsupported field type %s", elemType)
+ }
+ f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
+ for i, v := range values {
+ result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
+ if err := result[1].Interface(); err != nil {
+ return err.(error)
+ }
+ f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
+ }
+ return nil
+}
+
+func populateField(f reflect.Value, value string, props *proto.Properties) error {
+ i := f.Addr().Interface()
+
+ // Handle protobuf well known types
+ var name string
+ switch m := i.(type) {
+ case interface{ XXX_WellKnownType() string }:
+ name = m.XXX_WellKnownType()
+ case proto.Message:
+ const wktPrefix = "google.protobuf."
+ if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) {
+ name = fullName[len(wktPrefix):]
+ }
+ }
+ switch name {
+ case "Timestamp":
+ if value == "null" {
+ f.FieldByName("Seconds").SetInt(0)
+ f.FieldByName("Nanos").SetInt(0)
+ return nil
+ }
+
+ t, err := time.Parse(time.RFC3339Nano, value)
+ if err != nil {
+ return fmt.Errorf("bad Timestamp: %v", err)
+ }
+ f.FieldByName("Seconds").SetInt(int64(t.Unix()))
+ f.FieldByName("Nanos").SetInt(int64(t.Nanosecond()))
+ return nil
+ case "Duration":
+ if value == "null" {
+ f.FieldByName("Seconds").SetInt(0)
+ f.FieldByName("Nanos").SetInt(0)
+ return nil
+ }
+ d, err := time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("bad Duration: %v", err)
+ }
+
+ ns := d.Nanoseconds()
+ s := ns / 1e9
+ ns %= 1e9
+ f.FieldByName("Seconds").SetInt(s)
+ f.FieldByName("Nanos").SetInt(ns)
+ return nil
+ case "DoubleValue":
+ fallthrough
+ case "FloatValue":
+ float64Val, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return fmt.Errorf("bad DoubleValue: %s", value)
+ }
+ f.FieldByName("Value").SetFloat(float64Val)
+ return nil
+ case "Int64Value":
+ fallthrough
+ case "Int32Value":
+ int64Val, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad DoubleValue: %s", value)
+ }
+ f.FieldByName("Value").SetInt(int64Val)
+ return nil
+ case "UInt64Value":
+ fallthrough
+ case "UInt32Value":
+ uint64Val, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return fmt.Errorf("bad DoubleValue: %s", value)
+ }
+ f.FieldByName("Value").SetUint(uint64Val)
+ return nil
+ case "BoolValue":
+ if value == "true" {
+ f.FieldByName("Value").SetBool(true)
+ } else if value == "false" {
+ f.FieldByName("Value").SetBool(false)
+ } else {
+ return fmt.Errorf("bad BoolValue: %s", value)
+ }
+ return nil
+ case "StringValue":
+ f.FieldByName("Value").SetString(value)
+ return nil
+ case "BytesValue":
+ bytesVal, err := base64.StdEncoding.DecodeString(value)
+ if err != nil {
+ return fmt.Errorf("bad BytesValue: %s", value)
+ }
+ f.FieldByName("Value").SetBytes(bytesVal)
+ return nil
+ case "FieldMask":
+ p := f.FieldByName("Paths")
+ for _, v := range strings.Split(value, ",") {
+ if v != "" {
+ p.Set(reflect.Append(p, reflect.ValueOf(v)))
+ }
+ }
+ return nil
+ }
+
+ // Handle Time and Duration stdlib types
+ switch t := i.(type) {
+ case *time.Time:
+ pt, err := time.Parse(time.RFC3339Nano, value)
+ if err != nil {
+ return fmt.Errorf("bad Timestamp: %v", err)
+ }
+ *t = pt
+ return nil
+ case *time.Duration:
+ d, err := time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("bad Duration: %v", err)
+ }
+ *t = d
+ return nil
+ }
+
+ // is the destination field an enumeration type?
+ if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
+ return populateFieldEnum(f, value, enumValMap)
+ }
+
+ conv, ok := convFromType[f.Kind()]
+ if !ok {
+ return fmt.Errorf("field type %T is not supported in query parameters", i)
+ }
+ result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
+ if err := result[1].Interface(); err != nil {
+ return err.(error)
+ }
+ f.Set(result[0].Convert(f.Type()))
+ return nil
+}
+
+func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
+ // see if it's an enumeration string
+ if enumVal, ok := enumValMap[value]; ok {
+ return reflect.ValueOf(enumVal).Convert(t), nil
+ }
+
+ // check for an integer that matches an enumeration value
+ eVal, err := strconv.Atoi(value)
+ if err != nil {
+ return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
+ }
+ for _, v := range enumValMap {
+ if v == int32(eVal) {
+ return reflect.ValueOf(eVal).Convert(t), nil
+ }
+ }
+ return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
+}
+
+func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
+ cval, err := convertEnum(value, f.Type(), enumValMap)
+ if err != nil {
+ return err
+ }
+ f.Set(cval)
+ return nil
+}
+
+func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
+ elemType := f.Type().Elem()
+ f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
+ for i, v := range values {
+ result, err := convertEnum(v, elemType, enumValMap)
+ if err != nil {
+ return err
+ }
+ f.Index(i).Set(result)
+ }
+ return nil
+}
+
+var (
+ convFromType = map[reflect.Kind]reflect.Value{
+ reflect.String: reflect.ValueOf(String),
+ reflect.Bool: reflect.ValueOf(Bool),
+ reflect.Float64: reflect.ValueOf(Float64),
+ reflect.Float32: reflect.ValueOf(Float32),
+ reflect.Int64: reflect.ValueOf(Int64),
+ reflect.Int32: reflect.ValueOf(Int32),
+ reflect.Uint64: reflect.ValueOf(Uint64),
+ reflect.Uint32: reflect.ValueOf(Uint32),
+ reflect.Slice: reflect.ValueOf(Bytes),
+ }
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
new file mode 100644
index 000000000000..7109d7932318
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "go_default_library",
+ srcs = [
+ "doc.go",
+ "pattern.go",
+ "readerfactory.go",
+ "trie.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
+)
+
+go_test(
+ name = "go_default_test",
+ size = "small",
+ srcs = ["trie_test.go"],
+ embed = [":go_default_library"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
new file mode 100644
index 000000000000..cf79a4d58860
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
@@ -0,0 +1,2 @@
+// Package utilities provides members for internal use in grpc-gateway.
+package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
new file mode 100644
index 000000000000..dfe7de4864ab
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
@@ -0,0 +1,22 @@
+package utilities
+
+// An OpCode is a opcode of compiled path patterns.
+type OpCode int
+
+// These constants are the valid values of OpCode.
+const (
+ // OpNop does nothing
+ OpNop = OpCode(iota)
+ // OpPush pushes a component to stack
+ OpPush
+ // OpLitPush pushes a component to stack if it matches to the literal
+ OpLitPush
+ // OpPushM concatenates the remaining components and pushes it to stack
+ OpPushM
+ // OpConcatN pops N items from stack, concatenates them and pushes it back to stack
+ OpConcatN
+ // OpCapture pops an item and binds it to the variable
+ OpCapture
+ // OpEnd is the least positive invalid opcode.
+ OpEnd
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
new file mode 100644
index 000000000000..6dd3854665f1
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
@@ -0,0 +1,20 @@
+package utilities
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+)
+
+// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
+// at the start of the stream
+func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return func() io.Reader {
+ return bytes.NewReader(b)
+ }, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
new file mode 100644
index 000000000000..c2b7b30dd917
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
@@ -0,0 +1,177 @@
+package utilities
+
+import (
+ "sort"
+)
+
+// DoubleArray is a Double Array implementation of trie on sequences of strings.
+type DoubleArray struct {
+ // Encoding keeps an encoding from string to int
+ Encoding map[string]int
+ // Base is the base array of Double Array
+ Base []int
+ // Check is the check array of Double Array
+ Check []int
+}
+
+// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
+func NewDoubleArray(seqs [][]string) *DoubleArray {
+ da := &DoubleArray{Encoding: make(map[string]int)}
+ if len(seqs) == 0 {
+ return da
+ }
+
+ encoded := registerTokens(da, seqs)
+ sort.Sort(byLex(encoded))
+
+ root := node{row: -1, col: -1, left: 0, right: len(encoded)}
+ addSeqs(da, encoded, 0, root)
+
+ for i := len(da.Base); i > 0; i-- {
+ if da.Check[i-1] != 0 {
+ da.Base = da.Base[:i]
+ da.Check = da.Check[:i]
+ break
+ }
+ }
+ return da
+}
+
+func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
+ var result [][]int
+ for _, seq := range seqs {
+ var encoded []int
+ for _, token := range seq {
+ if _, ok := da.Encoding[token]; !ok {
+ da.Encoding[token] = len(da.Encoding)
+ }
+ encoded = append(encoded, da.Encoding[token])
+ }
+ result = append(result, encoded)
+ }
+ for i := range result {
+ result[i] = append(result[i], len(da.Encoding))
+ }
+ return result
+}
+
+type node struct {
+ row, col int
+ left, right int
+}
+
+func (n node) value(seqs [][]int) int {
+ return seqs[n.row][n.col]
+}
+
+func (n node) children(seqs [][]int) []*node {
+ var result []*node
+ lastVal := int(-1)
+ last := new(node)
+ for i := n.left; i < n.right; i++ {
+ if lastVal == seqs[i][n.col+1] {
+ continue
+ }
+ last.right = i
+ last = &node{
+ row: i,
+ col: n.col + 1,
+ left: i,
+ }
+ result = append(result, last)
+ }
+ last.right = n.right
+ return result
+}
+
+func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
+ ensureSize(da, pos)
+
+ children := n.children(seqs)
+ var i int
+ for i = 1; ; i++ {
+ ok := func() bool {
+ for _, child := range children {
+ code := child.value(seqs)
+ j := i + code
+ ensureSize(da, j)
+ if da.Check[j] != 0 {
+ return false
+ }
+ }
+ return true
+ }()
+ if ok {
+ break
+ }
+ }
+ da.Base[pos] = i
+ for _, child := range children {
+ code := child.value(seqs)
+ j := i + code
+ da.Check[j] = pos + 1
+ }
+ terminator := len(da.Encoding)
+ for _, child := range children {
+ code := child.value(seqs)
+ if code == terminator {
+ continue
+ }
+ j := i + code
+ addSeqs(da, seqs, j, *child)
+ }
+}
+
+func ensureSize(da *DoubleArray, i int) {
+ for i >= len(da.Base) {
+ da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
+ da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
+ }
+}
+
+type byLex [][]int
+
+func (l byLex) Len() int { return len(l) }
+func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l byLex) Less(i, j int) bool {
+ si := l[i]
+ sj := l[j]
+ var k int
+ for k = 0; k < len(si) && k < len(sj); k++ {
+ if si[k] < sj[k] {
+ return true
+ }
+ if si[k] > sj[k] {
+ return false
+ }
+ }
+ if k < len(sj) {
+ return true
+ }
+ return false
+}
+
+// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
+func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
+ if len(da.Base) == 0 {
+ return false
+ }
+
+ var i int
+ for _, t := range seq {
+ code, ok := da.Encoding[t]
+ if !ok {
+ break
+ }
+ j := da.Base[i] + code
+ if len(da.Check) <= j || da.Check[j] != i+1 {
+ break
+ }
+ i = j
+ }
+ j := da.Base[i] + len(da.Encoding)
+ if len(da.Check) <= j || da.Check[j] != i+1 {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/lxc/lxd/AUTHORS b/vendor/github.com/lxc/lxd/AUTHORS
deleted file mode 100644
index f7c0c6a2e33e..000000000000
--- a/vendor/github.com/lxc/lxd/AUTHORS
+++ /dev/null
@@ -1,5 +0,0 @@
-Unless mentioned otherwise in a specific file's header, all code in this
-project is released under the Apache 2.0 license.
-
-The list of authors and contributors can be retrieved from the git
-commit history and in some cases, the file headers.
diff --git a/vendor/github.com/lxc/lxd/shared/api/certificate.go b/vendor/github.com/lxc/lxd/shared/api/certificate.go
deleted file mode 100644
index 56664fa6ccff..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/certificate.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package api
-
-// CertificatesPost represents the fields of a new LXD certificate
-type CertificatesPost struct {
- CertificatePut `yaml:",inline"`
-
- Certificate string `json:"certificate" yaml:"certificate"`
- Password string `json:"password" yaml:"password"`
-}
-
-// CertificatePut represents the modifiable fields of a LXD certificate
-//
-// API extension: certificate_update
-type CertificatePut struct {
- Name string `json:"name" yaml:"name"`
- Type string `json:"type" yaml:"type"`
-}
-
-// Certificate represents a LXD certificate
-type Certificate struct {
- CertificatePut `yaml:",inline"`
-
- Certificate string `json:"certificate" yaml:"certificate"`
- Fingerprint string `json:"fingerprint" yaml:"fingerprint"`
-}
-
-// Writable converts a full Certificate struct into a CertificatePut struct (filters read-only fields)
-func (cert *Certificate) Writable() CertificatePut {
- return cert.CertificatePut
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/cluster.go b/vendor/github.com/lxc/lxd/shared/api/cluster.go
deleted file mode 100644
index c773929c8da3..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/cluster.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package api
-
-// Cluster represents high-level information about a LXD cluster.
-//
-// API extension: clustering
-type Cluster struct {
- ServerName string `json:"server_name" yaml:"server_name"`
- Enabled bool `json:"enabled" yaml:"enabled"`
-
- // API extension: clustering_join
- MemberConfig []ClusterMemberConfigKey `json:"member_config" yaml:"member_config"`
-}
-
-// ClusterMemberConfigKey represents a single config key that a new member of
-// the cluster is required to provide when joining.
-//
-// The Value field is empty when getting clustering information with GET
-// /1.0/cluster, and should be filled by the joining node when performing a PUT
-// /1.0/cluster join request.
-//
-// API extension: clustering_join
-type ClusterMemberConfigKey struct {
- Entity string `json:"entity" yaml:"entity"`
- Name string `json:"name" yaml:"name"`
- Key string `json:"key" yaml:"key"`
- Value string `json:"value" yaml:"value"`
- Description string `json:"description" yaml:"description"`
-}
-
-// ClusterPut represents the fields required to bootstrap or join a LXD
-// cluster.
-//
-// API extension: clustering
-type ClusterPut struct {
- Cluster `yaml:",inline"`
- ClusterAddress string `json:"cluster_address" yaml:"cluster_address"`
- ClusterCertificate string `json:"cluster_certificate" yaml:"cluster_certificate"`
-
- // API extension: clustering_join
- ServerAddress string `json:"server_address" yaml:"server_address"`
- ClusterPassword string `json:"cluster_password" yaml:"cluster_password"`
-}
-
-// ClusterMemberPost represents the fields required to rename a LXD node.
-//
-// API extension: clustering
-type ClusterMemberPost struct {
- ServerName string `json:"server_name" yaml:"server_name"`
-}
-
-// ClusterMember represents the a LXD node in the cluster.
-//
-// API extension: clustering
-type ClusterMember struct {
- ServerName string `json:"server_name" yaml:"server_name"`
- URL string `json:"url" yaml:"url"`
- Database bool `json:"database" yaml:"database"`
- Status string `json:"status" yaml:"status"`
- Message string `json:"message" yaml:"message"`
-
- // API extension: clustering_roles
- Roles []string `json:"roles" yaml:"roles"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/container.go b/vendor/github.com/lxc/lxd/shared/api/container.go
deleted file mode 100644
index ed41a6e61e2d..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/container.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package api
-
-import (
- "time"
-)
-
-// ContainersPost represents the fields available for a new LXD container
-type ContainersPost struct {
- ContainerPut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name"`
- Source ContainerSource `json:"source" yaml:"source"`
-
- InstanceType string `json:"instance_type" yaml:"instance_type"`
-}
-
-// ContainerPost represents the fields required to rename/move a LXD container
-type ContainerPost struct {
- // Used for renames
- Name string `json:"name" yaml:"name"`
-
- // Used for migration
- Migration bool `json:"migration" yaml:"migration"`
-
- // API extension: container_stateless_copy
- Live bool `json:"live" yaml:"live"`
-
- // API extension: container_only_migration
- ContainerOnly bool `json:"container_only" yaml:"container_only"`
-
- // API extension: container_push_target
- Target *ContainerPostTarget `json:"target" yaml:"target"`
-}
-
-// ContainerPostTarget represents the migration target host and operation
-//
-// API extension: container_push_target
-type ContainerPostTarget struct {
- Certificate string `json:"certificate" yaml:"certificate"`
- Operation string `json:"operation,omitempty" yaml:"operation,omitempty"`
- Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"`
-}
-
-// ContainerPut represents the modifiable fields of a LXD container
-type ContainerPut struct {
- Architecture string `json:"architecture" yaml:"architecture"`
- Config map[string]string `json:"config" yaml:"config"`
- Devices map[string]map[string]string `json:"devices" yaml:"devices"`
- Ephemeral bool `json:"ephemeral" yaml:"ephemeral"`
- Profiles []string `json:"profiles" yaml:"profiles"`
-
- // For snapshot restore
- Restore string `json:"restore,omitempty" yaml:"restore,omitempty"`
- Stateful bool `json:"stateful" yaml:"stateful"`
-
- // API extension: entity_description
- Description string `json:"description" yaml:"description"`
-}
-
-// Container represents a LXD container
-type Container struct {
- ContainerPut `yaml:",inline"`
-
- CreatedAt time.Time `json:"created_at" yaml:"created_at"`
- ExpandedConfig map[string]string `json:"expanded_config" yaml:"expanded_config"`
- ExpandedDevices map[string]map[string]string `json:"expanded_devices" yaml:"expanded_devices"`
- Name string `json:"name" yaml:"name"`
- Status string `json:"status" yaml:"status"`
- StatusCode StatusCode `json:"status_code" yaml:"status_code"`
-
- // API extension: container_last_used_at
- LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"`
-
- // API extension: clustering
- Location string `json:"location" yaml:"location"`
-}
-
-// ContainerFull is a combination of Container, ContainerState and CotnainerSnapshot
-//
-// API extension: container_full
-type ContainerFull struct {
- Container `yaml:",inline"`
-
- Backups []ContainerBackup `json:"backups" yaml:"backups"`
- State *ContainerState `json:"state" yaml:"state"`
- Snapshots []ContainerSnapshot `json:"snapshots" yaml:"snapshots"`
-}
-
-// Writable converts a full Container struct into a ContainerPut struct (filters read-only fields)
-func (c *Container) Writable() ContainerPut {
- return c.ContainerPut
-}
-
-// IsActive checks whether the container state indicates the container is active
-func (c Container) IsActive() bool {
- switch c.StatusCode {
- case Stopped:
- return false
- case Error:
- return false
- default:
- return true
- }
-}
-
-// ContainerSource represents the creation source for a new container
-type ContainerSource struct {
- Type string `json:"type" yaml:"type"`
- Certificate string `json:"certificate" yaml:"certificate"`
-
- // For "image" type
- Alias string `json:"alias,omitempty" yaml:"alias,omitempty"`
- Fingerprint string `json:"fingerprint,omitempty" yaml:"fingerprint,omitempty"`
- Properties map[string]string `json:"properties,omitempty" yaml:"properties,omitempty"`
- Server string `json:"server,omitempty" yaml:"server,omitempty"`
- Secret string `json:"secret,omitempty" yaml:"secret,omitempty"`
- Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"`
-
- // For "migration" and "copy" types
- BaseImage string `json:"base-image,omitempty" yaml:"base-image,omitempty"`
-
- // For "migration" type
- Mode string `json:"mode,omitempty" yaml:"mode,omitempty"`
- Operation string `json:"operation,omitempty" yaml:"operation,omitempty"`
- Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"`
-
- // For "copy" type
- Source string `json:"source,omitempty" yaml:"source,omitempty"`
-
- // API extension: container_push
- Live bool `json:"live,omitempty" yaml:"live,omitempty"`
-
- // API extension: container_only_migration
- ContainerOnly bool `json:"container_only,omitempty" yaml:"container_only,omitempty"`
-
- // API extension: container_incremental_copy
- Refresh bool `json:"refresh,omitempty" yaml:"refresh,omitempty"`
-
- // API extension: container_copy_project
- Project string `json:"project,omitempty" yaml:"project,omitempty"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/container_backup.go b/vendor/github.com/lxc/lxd/shared/api/container_backup.go
deleted file mode 100644
index 8fe35e9964da..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/container_backup.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package api
-
-import "time"
-
-// ContainerBackupsPost represents the fields available for a new LXD container backup
-// API extension: container_backup
-type ContainerBackupsPost struct {
- Name string `json:"name" yaml:"name"`
- ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"`
- ContainerOnly bool `json:"container_only" yaml:"container_only"`
- OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"`
-}
-
-// ContainerBackup represents a LXD container backup
-// API extension: container_backup
-type ContainerBackup struct {
- Name string `json:"name" yaml:"name"`
- CreatedAt time.Time `json:"created_at" yaml:"created_at"`
- ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"`
- ContainerOnly bool `json:"container_only" yaml:"container_only"`
- OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"`
-}
-
-// ContainerBackupPost represents the fields available for the renaming of a
-// container backup
-// API extension: container_backup
-type ContainerBackupPost struct {
- Name string `json:"name" yaml:"name"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/container_console.go b/vendor/github.com/lxc/lxd/shared/api/container_console.go
deleted file mode 100644
index 56aff07aa495..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/container_console.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package api
-
-// ContainerConsoleControl represents a message on the container console "control" socket
-//
-// API extension: console
-type ContainerConsoleControl struct {
- Command string `json:"command" yaml:"command"`
- Args map[string]string `json:"args" yaml:"args"`
-}
-
-// ContainerConsolePost represents a LXD container console request
-//
-// API extension: console
-type ContainerConsolePost struct {
- Width int `json:"width" yaml:"width"`
- Height int `json:"height" yaml:"height"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/container_exec.go b/vendor/github.com/lxc/lxd/shared/api/container_exec.go
deleted file mode 100644
index 7e724dc49fe1..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/container_exec.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package api
-
-// ContainerExecControl represents a message on the container exec "control" socket
-type ContainerExecControl struct {
- Command string `json:"command" yaml:"command"`
- Args map[string]string `json:"args" yaml:"args"`
- Signal int `json:"signal" yaml:"signal"`
-}
-
-// ContainerExecPost represents a LXD container exec request
-type ContainerExecPost struct {
- Command []string `json:"command" yaml:"command"`
- WaitForWS bool `json:"wait-for-websocket" yaml:"wait-for-websocket"`
- Interactive bool `json:"interactive" yaml:"interactive"`
- Environment map[string]string `json:"environment" yaml:"environment"`
- Width int `json:"width" yaml:"width"`
- Height int `json:"height" yaml:"height"`
-
- // API extension: container_exec_recording
- RecordOutput bool `json:"record-output" yaml:"record-output"`
-
- // API extension: container_user_group_cwd
- User uint32 `json:"user" yaml:"user"`
- Group uint32 `json:"group" yaml:"group"`
- Cwd string `json:"cwd" yaml:"cwd"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/container_snapshot.go b/vendor/github.com/lxc/lxd/shared/api/container_snapshot.go
deleted file mode 100644
index e68a0fb8cc09..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/container_snapshot.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package api
-
-import (
- "time"
-)
-
-// ContainerSnapshotsPost represents the fields available for a new LXD container snapshot
-type ContainerSnapshotsPost struct {
- Name string `json:"name" yaml:"name"`
- Stateful bool `json:"stateful" yaml:"stateful"`
-
- // API extension: snapshot_expiry_creation
- ExpiresAt *time.Time `json:"expires_at" yaml:"expires_at"`
-}
-
-// ContainerSnapshotPost represents the fields required to rename/move a LXD container snapshot
-type ContainerSnapshotPost struct {
- Name string `json:"name" yaml:"name"`
- Migration bool `json:"migration" yaml:"migration"`
- Target *ContainerPostTarget `json:"target" yaml:"target"`
-
- // API extension: container_snapshot_stateful_migration
- Live bool `json:"live,omitempty" yaml:"live,omitempty"`
-}
-
-// ContainerSnapshotPut represents the modifiable fields of a LXD container snapshot
-// API extension: snapshot_expiry
-type ContainerSnapshotPut struct {
- Architecture string `json:"architecture" yaml:"architecture"`
- Config map[string]string `json:"config" yaml:"config"`
- Devices map[string]map[string]string `json:"devices" yaml:"devices"`
- Ephemeral bool `json:"ephemeral" yaml:"ephemeral"`
- Profiles []string `json:"profiles" yaml:"profiles"`
- ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"`
-}
-
-// ContainerSnapshot represents a LXD conainer snapshot
-type ContainerSnapshot struct {
- ContainerSnapshotPut `yaml:",inline"`
-
- CreatedAt time.Time `json:"created_at" yaml:"created_at"`
- ExpandedConfig map[string]string `json:"expanded_config" yaml:"expanded_config"`
- ExpandedDevices map[string]map[string]string `json:"expanded_devices" yaml:"expanded_devices"`
- LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"`
- Name string `json:"name" yaml:"name"`
- Stateful bool `json:"stateful" yaml:"stateful"`
-}
-
-// Writable converts a full ContainerSnapshot struct into a ContainerSnapshotPut struct
-// (filters read-only fields)
-func (c *ContainerSnapshot) Writable() ContainerSnapshotPut {
- return c.ContainerSnapshotPut
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/container_state.go b/vendor/github.com/lxc/lxd/shared/api/container_state.go
deleted file mode 100644
index f9e1cb9b53ff..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/container_state.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package api
-
-// ContainerStatePut represents the modifiable fields of a LXD container's state
-type ContainerStatePut struct {
- Action string `json:"action" yaml:"action"`
- Timeout int `json:"timeout" yaml:"timeout"`
- Force bool `json:"force" yaml:"force"`
- Stateful bool `json:"stateful" yaml:"stateful"`
-}
-
-// ContainerState represents a LXD container's state
-type ContainerState struct {
- Status string `json:"status" yaml:"status"`
- StatusCode StatusCode `json:"status_code" yaml:"status_code"`
- Disk map[string]ContainerStateDisk `json:"disk" yaml:"disk"`
- Memory ContainerStateMemory `json:"memory" yaml:"memory"`
- Network map[string]ContainerStateNetwork `json:"network" yaml:"network"`
- Pid int64 `json:"pid" yaml:"pid"`
- Processes int64 `json:"processes" yaml:"processes"`
-
- // API extension: container_cpu_time
- CPU ContainerStateCPU `json:"cpu" yaml:"cpu"`
-}
-
-// ContainerStateDisk represents the disk information section of a LXD container's state
-type ContainerStateDisk struct {
- Usage int64 `json:"usage" yaml:"usage"`
-}
-
-// ContainerStateCPU represents the cpu information section of a LXD container's state
-//
-// API extension: container_cpu_time
-type ContainerStateCPU struct {
- Usage int64 `json:"usage" yaml:"usage"`
-}
-
-// ContainerStateMemory represents the memory information section of a LXD container's state
-type ContainerStateMemory struct {
- Usage int64 `json:"usage" yaml:"usage"`
- UsagePeak int64 `json:"usage_peak" yaml:"usage_peak"`
- SwapUsage int64 `json:"swap_usage" yaml:"swap_usage"`
- SwapUsagePeak int64 `json:"swap_usage_peak" yaml:"swap_usage_peak"`
-}
-
-// ContainerStateNetwork represents the network information section of a LXD container's state
-type ContainerStateNetwork struct {
- Addresses []ContainerStateNetworkAddress `json:"addresses" yaml:"addresses"`
- Counters ContainerStateNetworkCounters `json:"counters" yaml:"counters"`
- Hwaddr string `json:"hwaddr" yaml:"hwaddr"`
- HostName string `json:"host_name" yaml:"host_name"`
- Mtu int `json:"mtu" yaml:"mtu"`
- State string `json:"state" yaml:"state"`
- Type string `json:"type" yaml:"type"`
-}
-
-// ContainerStateNetworkAddress represents a network address as part of the network section of a LXD container's state
-type ContainerStateNetworkAddress struct {
- Family string `json:"family" yaml:"family"`
- Address string `json:"address" yaml:"address"`
- Netmask string `json:"netmask" yaml:"netmask"`
- Scope string `json:"scope" yaml:"scope"`
-}
-
-// ContainerStateNetworkCounters represents packet counters as part of the network section of a LXD container's state
-type ContainerStateNetworkCounters struct {
- BytesReceived int64 `json:"bytes_received" yaml:"bytes_received"`
- BytesSent int64 `json:"bytes_sent" yaml:"bytes_sent"`
- PacketsReceived int64 `json:"packets_received" yaml:"packets_received"`
- PacketsSent int64 `json:"packets_sent" yaml:"packets_sent"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/doc.go b/vendor/github.com/lxc/lxd/shared/api/doc.go
deleted file mode 100644
index f7400524dddc..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/doc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Package api contains Go structs for all LXD API objects
-//
-// Overview
-//
-// This package has Go structs for every API object, all the various
-// structs are named after the object they represent and some variations of
-// those structs exist for initial object creation, object update and
-// object retrieval.
-//
-// A few convenience functions are also tied to those structs which let
-// you convert between the various strucs for a given object and also query
-// some of the more complex metadata that LXD can export.
-package api
diff --git a/vendor/github.com/lxc/lxd/shared/api/event.go b/vendor/github.com/lxc/lxd/shared/api/event.go
deleted file mode 100644
index 5b9e871857ef..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/event.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package api
-
-import (
- "encoding/json"
- "time"
-)
-
-// Event represents an event entry (over websocket)
-type Event struct {
- Type string `yaml:"type" json:"type"`
- Timestamp time.Time `yaml:"timestamp" json:"timestamp"`
- Metadata json.RawMessage `yaml:"metadata" json:"metadata"`
-
- // API extension: event_location
- Location string `yaml:"location,omitempty" json:"location,omitempty"`
-}
-
-// EventLogging represents a logging type event entry (admin only)
-type EventLogging struct {
- Message string `yaml:"message" json:"message"`
- Level string `yaml:"level" json:"level"`
- Context map[string]string `yaml:"context" json:"context"`
-}
-
-// EventLifecycle represets a lifecycle type event entry
-//
-// API extension: event_lifecycle
-type EventLifecycle struct {
- Action string `yaml:"action" json:"action"`
- Source string `yaml:"source" json:"source"`
- Context map[string]interface{} `yaml:"context,omitempty" json:"context,omitempty"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/image.go b/vendor/github.com/lxc/lxd/shared/api/image.go
deleted file mode 100644
index 0eb4c392e6c4..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/image.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package api
-
-import (
- "time"
-)
-
-// ImagesPost represents the fields available for a new LXD image
-type ImagesPost struct {
- ImagePut `yaml:",inline"`
-
- Filename string `json:"filename" yaml:"filename"`
- Source *ImagesPostSource `json:"source" yaml:"source"`
-
- // API extension: image_compression_algorithm
- CompressionAlgorithm string `json:"compression_algorithm" yaml:"compression_algorithm"`
-
- // API extension: image_create_aliases
- Aliases []ImageAlias `json:"aliases" yaml:"aliases"`
-}
-
-// ImagesPostSource represents the source of a new LXD image
-type ImagesPostSource struct {
- ImageSource `yaml:",inline"`
-
- Mode string `json:"mode" yaml:"mode"`
- Type string `json:"type" yaml:"type"`
-
- // For protocol "direct"
- URL string `json:"url" yaml:"url"`
-
- // For type "container"
- Name string `json:"name" yaml:"name"`
-
- // For type "image"
- Fingerprint string `json:"fingerprint" yaml:"fingerprint"`
- Secret string `json:"secret" yaml:"secret"`
-}
-
-// ImagePut represents the modifiable fields of a LXD image
-type ImagePut struct {
- AutoUpdate bool `json:"auto_update" yaml:"auto_update"`
- Properties map[string]string `json:"properties" yaml:"properties"`
- Public bool `json:"public" yaml:"public"`
-
- // API extension: images_expiry
- ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"`
-}
-
-// Image represents a LXD image
-type Image struct {
- ImagePut `yaml:",inline"`
-
- Aliases []ImageAlias `json:"aliases" yaml:"aliases"`
- Architecture string `json:"architecture" yaml:"architecture"`
- Cached bool `json:"cached" yaml:"cached"`
- Filename string `json:"filename" yaml:"filename"`
- Fingerprint string `json:"fingerprint" yaml:"fingerprint"`
- Size int64 `json:"size" yaml:"size"`
- UpdateSource *ImageSource `json:"update_source,omitempty" yaml:"update_source,omitempty"`
-
- // API extension: image_types
- Type string `json:"type" yaml:"type"`
-
- CreatedAt time.Time `json:"created_at" yaml:"created_at"`
- LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"`
- UploadedAt time.Time `json:"uploaded_at" yaml:"uploaded_at"`
-}
-
-// Writable converts a full Image struct into a ImagePut struct (filters read-only fields)
-func (img *Image) Writable() ImagePut {
- return img.ImagePut
-}
-
-// ImageAlias represents an alias from the alias list of a LXD image
-type ImageAlias struct {
- Name string `json:"name" yaml:"name"`
- Description string `json:"description" yaml:"description"`
-}
-
-// ImageSource represents the source of a LXD image
-type ImageSource struct {
- Alias string `json:"alias" yaml:"alias"`
- Certificate string `json:"certificate" yaml:"certificate"`
- Protocol string `json:"protocol" yaml:"protocol"`
- Server string `json:"server" yaml:"server"`
-
- // API extension: image_types
- ImageType string `json:"image_type" yaml:"image_type"`
-}
-
-// ImageAliasesPost represents a new LXD image alias
-type ImageAliasesPost struct {
- ImageAliasesEntry `yaml:",inline"`
-}
-
-// ImageAliasesEntryPost represents the required fields to rename a LXD image alias
-type ImageAliasesEntryPost struct {
- Name string `json:"name" yaml:"name"`
-}
-
-// ImageAliasesEntryPut represents the modifiable fields of a LXD image alias
-type ImageAliasesEntryPut struct {
- Description string `json:"description" yaml:"description"`
- Target string `json:"target" yaml:"target"`
-}
-
-// ImageAliasesEntry represents a LXD image alias
-type ImageAliasesEntry struct {
- ImageAliasesEntryPut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name"`
-
- // API extension: image_types
- Type string `json:"type" yaml:"type"`
-}
-
-// ImageMetadata represents LXD image metadata
-type ImageMetadata struct {
- Architecture string `json:"architecture" yaml:"architecture"`
- CreationDate int64 `json:"creation_date" yaml:"creation_date"`
- ExpiryDate int64 `json:"expiry_date" yaml:"expiry_date"`
- Properties map[string]string `json:"properties" yaml:"properties"`
- Templates map[string]*ImageMetadataTemplate `json:"templates" yaml:"templates"`
-}
-
-// ImageMetadataTemplate represents a template entry in image metadata
-type ImageMetadataTemplate struct {
- When []string `json:"when" yaml:"when"`
- CreateOnly bool `json:"create_only" yaml:"create_only"`
- Template string `json:"template" yaml:"template"`
- Properties map[string]string `json:"properties" yaml:"properties"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/instance.go b/vendor/github.com/lxc/lxd/shared/api/instance.go
deleted file mode 100644
index cec6b4a25803..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/instance.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package api
-
-import (
- "time"
-)
-
-// InstanceType represents the type if instance being returned or requested via the API.
-type InstanceType string
-
-// InstanceTypeAny defines the instance type value for requesting any instance type.
-const InstanceTypeAny = InstanceType("")
-
-// InstanceTypeContainer defines the instance type value for a container.
-const InstanceTypeContainer = InstanceType("container")
-
-// InstanceTypeVM defines the instance type value for a virtual-machine.
-const InstanceTypeVM = InstanceType("virtual-machine")
-
-// InstancesPost represents the fields available for a new LXD instance.
-//
-// API extension: instances
-type InstancesPost struct {
- InstancePut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name"`
- Source InstanceSource `json:"source" yaml:"source"`
- InstanceType string `json:"instance_type" yaml:"instance_type"`
- Type InstanceType `json:"type" yaml:"type"`
-}
-
-// InstancePost represents the fields required to rename/move a LXD instance.
-//
-// API extension: instances
-type InstancePost struct {
- Name string `json:"name" yaml:"name"`
- Migration bool `json:"migration" yaml:"migration"`
- Live bool `json:"live" yaml:"live"`
- InstanceOnly bool `json:"instance_only" yaml:"instance_only"`
- ContainerOnly bool `json:"container_only" yaml:"container_only"` // Deprecated, use InstanceOnly.
- Target *InstancePostTarget `json:"target" yaml:"target"`
-}
-
-// InstancePostTarget represents the migration target host and operation.
-//
-// API extension: instances
-type InstancePostTarget struct {
- Certificate string `json:"certificate" yaml:"certificate"`
- Operation string `json:"operation,omitempty" yaml:"operation,omitempty"`
- Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"`
-}
-
-// InstancePut represents the modifiable fields of a LXD instance.
-//
-// API extension: instances
-type InstancePut struct {
- Architecture string `json:"architecture" yaml:"architecture"`
- Config map[string]string `json:"config" yaml:"config"`
- Devices map[string]map[string]string `json:"devices" yaml:"devices"`
- Ephemeral bool `json:"ephemeral" yaml:"ephemeral"`
- Profiles []string `json:"profiles" yaml:"profiles"`
- Restore string `json:"restore,omitempty" yaml:"restore,omitempty"`
- Stateful bool `json:"stateful" yaml:"stateful"`
- Description string `json:"description" yaml:"description"`
-}
-
-// Instance represents a LXD instance.
-//
-// API extension: instances
-type Instance struct {
- InstancePut `yaml:",inline"`
-
- CreatedAt time.Time `json:"created_at" yaml:"created_at"`
- ExpandedConfig map[string]string `json:"expanded_config" yaml:"expanded_config"`
- ExpandedDevices map[string]map[string]string `json:"expanded_devices" yaml:"expanded_devices"`
- Name string `json:"name" yaml:"name"`
- Status string `json:"status" yaml:"status"`
- StatusCode StatusCode `json:"status_code" yaml:"status_code"`
- LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"`
- Location string `json:"location" yaml:"location"`
- Type string `json:"type" yaml:"type"`
-}
-
-// InstanceFull is a combination of Instance, InstanceBackup, InstanceState and InstanceSnapshot.
-//
-// API extension: instances
-type InstanceFull struct {
- Instance `yaml:",inline"`
-
- Backups []InstanceBackup `json:"backups" yaml:"backups"`
- State *InstanceState `json:"state" yaml:"state"`
- Snapshots []InstanceSnapshot `json:"snapshots" yaml:"snapshots"`
-}
-
-// Writable converts a full Instance struct into a InstancePut struct (filters read-only fields).
-//
-// API extension: instances
-func (c *Instance) Writable() InstancePut {
- return c.InstancePut
-}
-
-// IsActive checks whether the instance state indicates the instance is active.
-//
-// API extension: instances
-func (c Instance) IsActive() bool {
- switch c.StatusCode {
- case Stopped:
- return false
- case Error:
- return false
- default:
- return true
- }
-}
-
-// InstanceSource represents the creation source for a new instance.
-//
-// API extension: instances
-type InstanceSource struct {
- Type string `json:"type" yaml:"type"`
- Certificate string `json:"certificate" yaml:"certificate"`
- Alias string `json:"alias,omitempty" yaml:"alias,omitempty"`
- Fingerprint string `json:"fingerprint,omitempty" yaml:"fingerprint,omitempty"`
- Properties map[string]string `json:"properties,omitempty" yaml:"properties,omitempty"`
- Server string `json:"server,omitempty" yaml:"server,omitempty"`
- Secret string `json:"secret,omitempty" yaml:"secret,omitempty"`
- Protocol string `json:"protocol,omitempty" yaml:"protocol,omitempty"`
- BaseImage string `json:"base-image,omitempty" yaml:"base-image,omitempty"`
- Mode string `json:"mode,omitempty" yaml:"mode,omitempty"`
- Operation string `json:"operation,omitempty" yaml:"operation,omitempty"`
- Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"`
- Source string `json:"source,omitempty" yaml:"source,omitempty"`
- Live bool `json:"live,omitempty" yaml:"live,omitempty"`
- InstanceOnly bool `json:"instance_only,omitempty" yaml:"instance_only,omitempty"`
- ContainerOnly bool `json:"container_only,omitempty" yaml:"container_only,omitempty"` // Deprecated, use InstanceOnly.
- Refresh bool `json:"refresh,omitempty" yaml:"refresh,omitempty"`
- Project string `json:"project,omitempty" yaml:"project,omitempty"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_backup.go b/vendor/github.com/lxc/lxd/shared/api/instance_backup.go
deleted file mode 100644
index 093c0cd96769..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/instance_backup.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package api
-
-import "time"
-
-// InstanceBackupsPost represents the fields available for a new LXD instance backup.
-//
-// API extension: instances
-type InstanceBackupsPost struct {
- Name string `json:"name" yaml:"name"`
- ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"`
- InstanceOnly bool `json:"instance_only" yaml:"instance_only"`
- ContainerOnly bool `json:"container_only" yaml:"container_only"` // Deprecated, use InstanceOnly.
- OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"`
-
- // API extension: backup_compression_algorithm
- CompressionAlgorithm string `json:"compression_algorithm" yaml:"compression_algorithm"`
-}
-
-// InstanceBackup represents a LXD instance backup.
-//
-// API extension: instances
-type InstanceBackup struct {
- Name string `json:"name" yaml:"name"`
- CreatedAt time.Time `json:"created_at" yaml:"created_at"`
- ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"`
- InstanceOnly bool `json:"instance_only" yaml:"instance_only"`
- ContainerOnly bool `json:"container_only" yaml:"container_only"` // Deprecated, use InstanceOnly.
- OptimizedStorage bool `json:"optimized_storage" yaml:"optimized_storage"`
-}
-
-// InstanceBackupPost represents the fields available for the renaming of a instance backup.
-//
-// API extension: instances
-type InstanceBackupPost struct {
- Name string `json:"name" yaml:"name"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_console.go b/vendor/github.com/lxc/lxd/shared/api/instance_console.go
deleted file mode 100644
index 614beb12458a..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/instance_console.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package api
-
-// InstanceConsoleControl represents a message on the instance console "control" socket.
-//
-// API extension: instances
-type InstanceConsoleControl struct {
- Command string `json:"command" yaml:"command"`
- Args map[string]string `json:"args" yaml:"args"`
-}
-
-// InstanceConsolePost represents a LXD instance console request.
-//
-// API extension: instances
-type InstanceConsolePost struct {
- Width int `json:"width" yaml:"width"`
- Height int `json:"height" yaml:"height"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_exec.go b/vendor/github.com/lxc/lxd/shared/api/instance_exec.go
deleted file mode 100644
index 4579b2c89de2..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/instance_exec.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package api
-
-// InstanceExecControl represents a message on the instance exec "control" socket.
-//
-// API extension: instances
-type InstanceExecControl struct {
- Command string `json:"command" yaml:"command"`
- Args map[string]string `json:"args" yaml:"args"`
- Signal int `json:"signal" yaml:"signal"`
-}
-
-// InstanceExecPost represents a LXD instance exec request.
-//
-// API extension: instances
-type InstanceExecPost struct {
- Command []string `json:"command" yaml:"command"`
- WaitForWS bool `json:"wait-for-websocket" yaml:"wait-for-websocket"`
- Interactive bool `json:"interactive" yaml:"interactive"`
- Environment map[string]string `json:"environment" yaml:"environment"`
- Width int `json:"width" yaml:"width"`
- Height int `json:"height" yaml:"height"`
- RecordOutput bool `json:"record-output" yaml:"record-output"`
- User uint32 `json:"user" yaml:"user"`
- Group uint32 `json:"group" yaml:"group"`
- Cwd string `json:"cwd" yaml:"cwd"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_snapshot.go b/vendor/github.com/lxc/lxd/shared/api/instance_snapshot.go
deleted file mode 100644
index bdd93544b2ce..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/instance_snapshot.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package api
-
-import (
- "time"
-)
-
-// InstanceSnapshotsPost represents the fields available for a new LXD instance snapshot.
-//
-// API extension: instances
-type InstanceSnapshotsPost struct {
- Name string `json:"name" yaml:"name"`
- Stateful bool `json:"stateful" yaml:"stateful"`
-
- // API extension: snapshot_expiry_creation
- ExpiresAt *time.Time `json:"expires_at" yaml:"expires_at"`
-}
-
-// InstanceSnapshotPost represents the fields required to rename/move a LXD instance snapshot.
-//
-// API extension: instances
-type InstanceSnapshotPost struct {
- Name string `json:"name" yaml:"name"`
- Migration bool `json:"migration" yaml:"migration"`
- Target *InstancePostTarget `json:"target" yaml:"target"`
- Live bool `json:"live,omitempty" yaml:"live,omitempty"`
-}
-
-// InstanceSnapshotPut represents the modifiable fields of a LXD instance snapshot.
-//
-// API extension: instances
-type InstanceSnapshotPut struct {
- Architecture string `json:"architecture" yaml:"architecture"`
- Config map[string]string `json:"config" yaml:"config"`
- Devices map[string]map[string]string `json:"devices" yaml:"devices"`
- Ephemeral bool `json:"ephemeral" yaml:"ephemeral"`
- Profiles []string `json:"profiles" yaml:"profiles"`
- ExpiresAt time.Time `json:"expires_at" yaml:"expires_at"`
-}
-
-// InstanceSnapshot represents a LXD instance snapshot.
-//
-// API extension: instances
-type InstanceSnapshot struct {
- InstanceSnapshotPut `yaml:",inline"`
-
- CreatedAt time.Time `json:"created_at" yaml:"created_at"`
- ExpandedConfig map[string]string `json:"expanded_config" yaml:"expanded_config"`
- ExpandedDevices map[string]map[string]string `json:"expanded_devices" yaml:"expanded_devices"`
- LastUsedAt time.Time `json:"last_used_at" yaml:"last_used_at"`
- Name string `json:"name" yaml:"name"`
- Stateful bool `json:"stateful" yaml:"stateful"`
-}
-
-// Writable converts a full InstanceSnapshot struct into a InstanceSnapshotPut struct
-// (filters read-only fields).
-//
-// API extension: instances
-func (c *InstanceSnapshot) Writable() InstanceSnapshotPut {
- return c.InstanceSnapshotPut
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/instance_state.go b/vendor/github.com/lxc/lxd/shared/api/instance_state.go
deleted file mode 100644
index cd7823cbac17..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/instance_state.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package api
-
-// InstanceStatePut represents the modifiable fields of a LXD instance's state.
-//
-// API extension: instances
-type InstanceStatePut struct {
- Action string `json:"action" yaml:"action"`
- Timeout int `json:"timeout" yaml:"timeout"`
- Force bool `json:"force" yaml:"force"`
- Stateful bool `json:"stateful" yaml:"stateful"`
-}
-
-// InstanceState represents a LXD instance's state.
-//
-// API extension: instances
-type InstanceState struct {
- Status string `json:"status" yaml:"status"`
- StatusCode StatusCode `json:"status_code" yaml:"status_code"`
- Disk map[string]InstanceStateDisk `json:"disk" yaml:"disk"`
- Memory InstanceStateMemory `json:"memory" yaml:"memory"`
- Network map[string]InstanceStateNetwork `json:"network" yaml:"network"`
- Pid int64 `json:"pid" yaml:"pid"`
- Processes int64 `json:"processes" yaml:"processes"`
- CPU InstanceStateCPU `json:"cpu" yaml:"cpu"`
-}
-
-// InstanceStateDisk represents the disk information section of a LXD instance's state.
-//
-// API extension: instances
-type InstanceStateDisk struct {
- Usage int64 `json:"usage" yaml:"usage"`
-}
-
-// InstanceStateCPU represents the cpu information section of a LXD instance's state.
-//
-// API extension: instances
-type InstanceStateCPU struct {
- Usage int64 `json:"usage" yaml:"usage"`
-}
-
-// InstanceStateMemory represents the memory information section of a LXD instance's state.
-//
-// API extension: instances
-type InstanceStateMemory struct {
- Usage int64 `json:"usage" yaml:"usage"`
- UsagePeak int64 `json:"usage_peak" yaml:"usage_peak"`
- SwapUsage int64 `json:"swap_usage" yaml:"swap_usage"`
- SwapUsagePeak int64 `json:"swap_usage_peak" yaml:"swap_usage_peak"`
-}
-
-// InstanceStateNetwork represents the network information section of a LXD instance's state.
-//
-// API extension: instances
-type InstanceStateNetwork struct {
- Addresses []InstanceStateNetworkAddress `json:"addresses" yaml:"addresses"`
- Counters InstanceStateNetworkCounters `json:"counters" yaml:"counters"`
- Hwaddr string `json:"hwaddr" yaml:"hwaddr"`
- HostName string `json:"host_name" yaml:"host_name"`
- Mtu int `json:"mtu" yaml:"mtu"`
- State string `json:"state" yaml:"state"`
- Type string `json:"type" yaml:"type"`
-}
-
-// InstanceStateNetworkAddress represents a network address as part of the network section of a LXD
-// instance's state.
-//
-// API extension: instances
-type InstanceStateNetworkAddress struct {
- Family string `json:"family" yaml:"family"`
- Address string `json:"address" yaml:"address"`
- Netmask string `json:"netmask" yaml:"netmask"`
- Scope string `json:"scope" yaml:"scope"`
-}
-
-// InstanceStateNetworkCounters represents packet counters as part of the network section of a LXD
-// instance's state.
-//
-// API extension: instances
-type InstanceStateNetworkCounters struct {
- BytesReceived int64 `json:"bytes_received" yaml:"bytes_received"`
- BytesSent int64 `json:"bytes_sent" yaml:"bytes_sent"`
- PacketsReceived int64 `json:"packets_received" yaml:"packets_received"`
- PacketsSent int64 `json:"packets_sent" yaml:"packets_sent"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/network.go b/vendor/github.com/lxc/lxd/shared/api/network.go
deleted file mode 100644
index 00478d26d768..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/network.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package api
-
-// NetworksPost represents the fields of a new LXD network
-//
-// API extension: network
-type NetworksPost struct {
- NetworkPut `yaml:",inline"`
-
- Managed bool `json:"managed" yaml:"managed"`
- Name string `json:"name" yaml:"name"`
- Type string `json:"type" yaml:"type"`
-}
-
-// NetworkPost represents the fields required to rename a LXD network
-//
-// API extension: network
-type NetworkPost struct {
- Name string `json:"name" yaml:"name"`
-}
-
-// NetworkPut represents the modifiable fields of a LXD network
-//
-// API extension: network
-type NetworkPut struct {
- Config map[string]string `json:"config" yaml:"config"`
-
- // API extension: entity_description
- Description string `json:"description" yaml:"description"`
-}
-
-// Network represents a LXD network
-type Network struct {
- NetworkPut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name"`
- Type string `json:"type" yaml:"type"`
- UsedBy []string `json:"used_by" yaml:"used_by"`
-
- // API extension: network
- Managed bool `json:"managed" yaml:"managed"`
-
- // API extension: clustering
- Status string `json:"status" yaml:"status"`
- Locations []string `json:"locations" yaml:"locations"`
-}
-
-// Writable converts a full Network struct into a NetworkPut struct (filters read-only fields)
-func (network *Network) Writable() NetworkPut {
- return network.NetworkPut
-}
-
-// NetworkLease represents a DHCP lease
-//
-// API extension: network_leases
-type NetworkLease struct {
- Hostname string `json:"hostname" yaml:"hostname"`
- Hwaddr string `json:"hwaddr" yaml:"hwaddr"`
- Address string `json:"address" yaml:"address"`
- Type string `json:"type" yaml:"type"`
-
- // API extension: network_leases_location
- Location string `json:"location" yaml:"location"`
-}
-
-// NetworkState represents the network state
-type NetworkState struct {
- Addresses []NetworkStateAddress `json:"addresses" yaml:"addresses"`
- Counters NetworkStateCounters `json:"counters" yaml:"counters"`
- Hwaddr string `json:"hwaddr" yaml:"hwaddr"`
- Mtu int `json:"mtu" yaml:"mtu"`
- State string `json:"state" yaml:"state"`
- Type string `json:"type" yaml:"type"`
-}
-
-// NetworkStateAddress represents a network address
-type NetworkStateAddress struct {
- Family string `json:"family" yaml:"family"`
- Address string `json:"address" yaml:"address"`
- Netmask string `json:"netmask" yaml:"netmask"`
- Scope string `json:"scope" yaml:"scope"`
-}
-
-// NetworkStateCounters represents packet counters
-type NetworkStateCounters struct {
- BytesReceived int64 `json:"bytes_received" yaml:"bytes_received"`
- BytesSent int64 `json:"bytes_sent" yaml:"bytes_sent"`
- PacketsReceived int64 `json:"packets_received" yaml:"packets_received"`
- PacketsSent int64 `json:"packets_sent" yaml:"packets_sent"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/operation.go b/vendor/github.com/lxc/lxd/shared/api/operation.go
deleted file mode 100644
index 98774b50d8f0..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/operation.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package api
-
-import (
- "time"
-)
-
-// Operation represents a LXD background operation
-type Operation struct {
- ID string `json:"id" yaml:"id"`
- Class string `json:"class" yaml:"class"`
- Description string `json:"description" yaml:"description"`
- CreatedAt time.Time `json:"created_at" yaml:"created_at"`
- UpdatedAt time.Time `json:"updated_at" yaml:"updated_at"`
- Status string `json:"status" yaml:"status"`
- StatusCode StatusCode `json:"status_code" yaml:"status_code"`
- Resources map[string][]string `json:"resources" yaml:"resources"`
- Metadata map[string]interface{} `json:"metadata" yaml:"metadata"`
- MayCancel bool `json:"may_cancel" yaml:"may_cancel"`
- Err string `json:"err" yaml:"err"`
-
- // API extension: operation_location
- Location string `json:"location" yaml:"location"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/profile.go b/vendor/github.com/lxc/lxd/shared/api/profile.go
deleted file mode 100644
index 3cc7a64280e8..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/profile.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package api
-
-// ProfilesPost represents the fields of a new LXD profile
-type ProfilesPost struct {
- ProfilePut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name" db:"primary=yes"`
-}
-
-// ProfilePost represents the fields required to rename a LXD profile
-type ProfilePost struct {
- Name string `json:"name" yaml:"name"`
-}
-
-// ProfilePut represents the modifiable fields of a LXD profile
-type ProfilePut struct {
- Config map[string]string `json:"config" yaml:"config"`
- Description string `json:"description" yaml:"description"`
- Devices map[string]map[string]string `json:"devices" yaml:"devices"`
-}
-
-// Profile represents a LXD profile
-type Profile struct {
- ProfilePut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name" db:"primary=yes"`
-
- // API extension: profile_usedby
- UsedBy []string `json:"used_by" yaml:"used_by"`
-}
-
-// Writable converts a full Profile struct into a ProfilePut struct (filters read-only fields)
-func (profile *Profile) Writable() ProfilePut {
- return profile.ProfilePut
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/project.go b/vendor/github.com/lxc/lxd/shared/api/project.go
deleted file mode 100644
index 376e7dc1398b..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/project.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package api
-
-// ProjectsPost represents the fields of a new LXD project
-//
-// API extension: projects
-type ProjectsPost struct {
- ProjectPut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name"`
-}
-
-// ProjectPost represents the fields required to rename a LXD project
-//
-// API extension: projects
-type ProjectPost struct {
- Name string `json:"name" yaml:"name"`
-}
-
-// ProjectPut represents the modifiable fields of a LXD project
-//
-// API extension: projects
-type ProjectPut struct {
- Description string `json:"description" yaml:"description"`
- Config map[string]string `json:"config" yaml:"config"`
-}
-
-// Project represents a LXD project
-//
-// API extension: projects
-type Project struct {
- ProjectPut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name"`
- UsedBy []string `json:"used_by" yaml:"used_by"`
-}
-
-// Writable converts a full Project struct into a ProjectPut struct (filters read-only fields)
-//
-// API extension: projects
-func (project *Project) Writable() ProjectPut {
- return project.ProjectPut
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/resource.go b/vendor/github.com/lxc/lxd/shared/api/resource.go
deleted file mode 100644
index f2a9619d156a..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/resource.go
+++ /dev/null
@@ -1,293 +0,0 @@
-package api
-
-// Resources represents the system resources avaible for LXD
-// API extension: resources
-type Resources struct {
- CPU ResourcesCPU `json:"cpu" yaml:"cpu"`
- Memory ResourcesMemory `json:"memory" yaml:"memory"`
-
- // API extension: resources_gpu
- GPU ResourcesGPU `json:"gpu" yaml:"gpu"`
-
- // API extension: resources_v2
- Network ResourcesNetwork `json:"network" yaml:"network"`
- Storage ResourcesStorage `json:"storage" yaml:"storage"`
-}
-
-// ResourcesCPU represents the cpu resources available on the system
-// API extension: resources
-type ResourcesCPU struct {
- // API extension: resources_v2
- Architecture string `json:"architecture" yaml:"architecture"`
-
- Sockets []ResourcesCPUSocket `json:"sockets" yaml:"sockets"`
- Total uint64 `json:"total" yaml:"total"`
-}
-
-// ResourcesCPUSocket represents a CPU socket on the system
-// API extension: resources_v2
-type ResourcesCPUSocket struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Vendor string `json:"vendor,omitempty" yaml:"vendor,omitempty"`
-
- Socket uint64 `json:"socket" yaml:"socket"`
- Cache []ResourcesCPUCache `json:"cache,omitempty" yaml:"cache,omitempty"`
- Cores []ResourcesCPUCore `json:"cores" yaml:"cores"`
-
- Frequency uint64 `json:"frequency,omitempty" yaml:"frequency,omitempty"`
- FrequencyMinimum uint64 `json:"frequency_minimum,omitempty" yaml:"frequency_minimum,omitempty"`
- FrequencyTurbo uint64 `json:"frequency_turbo,omitempty" yaml:"frequency_turbo,omitempty"`
-}
-
-// ResourcesCPUCache represents a CPU cache
-// API extension: resources_v2
-type ResourcesCPUCache struct {
- Level uint64 `json:"level" yaml:"level"`
- Type string `json:"type" yaml:"type"`
- Size uint64 `json:"size" yaml:"size"`
-}
-
-// ResourcesCPUCore represents a CPU core on the system
-// API extension: resources_v2
-type ResourcesCPUCore struct {
- Core uint64 `json:"core" yaml:"core"`
- NUMANode uint64 `json:"numa_node" yaml:"numa_node"`
-
- Threads []ResourcesCPUThread `json:"threads" yaml:"threads"`
-
- Frequency uint64 `json:"frequency,omitempty" yaml:"frequency,omitempty"`
-}
-
-// ResourcesCPUThread represents a CPU thread on the system
-// API extension: resources_v2
-type ResourcesCPUThread struct {
- ID int64 `json:"id" yaml:"id"`
- Thread uint64 `json:"thread" yaml:"thread"`
- Online bool `json:"online" yaml:"online"`
-}
-
-// ResourcesGPU represents the GPU resources available on the system
-// API extension: resources_gpu
-type ResourcesGPU struct {
- Cards []ResourcesGPUCard `json:"cards" yaml:"cards"`
- Total uint64 `json:"total" yaml:"total"`
-}
-
-// ResourcesGPUCard represents a GPU card on the system
-// API extension: resources_v2
-type ResourcesGPUCard struct {
- Driver string `json:"driver,omitempty" yaml:"driver,omitempty"`
- DriverVersion string `json:"driver_version,omitempty" yaml:"driver_version,omitempty"`
-
- DRM *ResourcesGPUCardDRM `json:"drm,omitempty" yaml:"drm,omitempty"`
- SRIOV *ResourcesGPUCardSRIOV `json:"sriov,omitempty" yaml:"sriov,omitempty"`
- Nvidia *ResourcesGPUCardNvidia `json:"nvidia,omitempty" yaml:"nvidia,omitempty"`
-
- NUMANode uint64 `json:"numa_node" yaml:"numa_node"`
- PCIAddress string `json:"pci_address,omitempty" yaml:"pci_address,omitempty"`
-
- Vendor string `json:"vendor,omitempty" yaml:"vendor,omitempty"`
- VendorID string `json:"vendor_id,omitempty" yaml:"vendor_id,omitempty"`
- Product string `json:"product,omitempty" yaml:"product,omitempty"`
- ProductID string `json:"product_id,omitempty" yaml:"product_id,omitempty"`
-}
-
-// ResourcesGPUCardDRM represents the Linux DRM configuration of the GPU
-// API extension: resources_v2
-type ResourcesGPUCardDRM struct {
- ID uint64 `json:"id" yaml:"id"`
-
- CardName string `json:"card_name" yaml:"card_name"`
- CardDevice string `json:"card_device" yaml:"card_device"`
-
- ControlName string `json:"control_name,omitempty" yaml:"control_name,omitempty"`
- ControlDevice string `json:"control_device,omitempty" yaml:"control_device,omitempty"`
-
- RenderName string `json:"render_name,omitempty" yaml:"render_name,omitempty"`
- RenderDevice string `json:"render_device,omitempty" yaml:"render_device,omitempty"`
-}
-
-// ResourcesGPUCardSRIOV represents the SRIOV configuration of the GPU
-// API extension: resources_v2
-type ResourcesGPUCardSRIOV struct {
- CurrentVFs uint64 `json:"current_vfs" yaml:"current_vfs"`
- MaximumVFs uint64 `json:"maximum_vfs" yaml:"maximum_vfs"`
-
- VFs []ResourcesGPUCard `json:"vfs" yaml:"vfs"`
-}
-
-// ResourcesGPUCardNvidia represents additional information for NVIDIA GPUs
-// API extension: resources_gpu
-type ResourcesGPUCardNvidia struct {
- CUDAVersion string `json:"cuda_version,omitempty" yaml:"cuda_version,omitempty"`
- NVRMVersion string `json:"nvrm_version,omitempty" yaml:"nvrm_version,omitempty"`
-
- Brand string `json:"brand" yaml:"brand"`
- Model string `json:"model" yaml:"model"`
- UUID string `json:"uuid,omitempty" yaml:"uuid,omitempty"`
- Architecture string `json:"architecture,omitempty" yaml:"architecture,omitempty"`
-
- // API extension: resources_v2
- CardName string `json:"card_name" yaml:"card_name"`
- CardDevice string `json:"card_device" yaml:"card_device"`
-}
-
-// ResourcesNetwork represents the network cards available on the system
-// API extension: resources_v2
-type ResourcesNetwork struct {
- Cards []ResourcesNetworkCard `json:"cards" yaml:"cards"`
- Total uint64 `json:"total" yaml:"total"`
-}
-
-// ResourcesNetworkCard represents a network card on the system
-// API extension: resources_v2
-type ResourcesNetworkCard struct {
- Driver string `json:"driver,omitempty" yaml:"driver,omitempty"`
- DriverVersion string `json:"driver_version,omitempty" yaml:"driver_version,omitempty"`
-
- Ports []ResourcesNetworkCardPort `json:"ports,omitempty" yaml:"ports,omitempty"`
- SRIOV *ResourcesNetworkCardSRIOV `json:"sriov,omitempty" yaml:"sriov,omitempty"`
-
- NUMANode uint64 `json:"numa_node" yaml:"numa_node"`
- PCIAddress string `json:"pci_address,omitempty" yaml:"pci_address,omitempty"`
-
- Vendor string `json:"vendor,omitempty" yaml:"vendor,omitempty"`
- VendorID string `json:"vendor_id,omitempty" yaml:"vendor_id,omitempty"`
- Product string `json:"product,omitempty" yaml:"product,omitempty"`
- ProductID string `json:"product_id,omitempty" yaml:"product_id,omitempty"`
-
- // API extension: resources_network_firmware
- FirmwareVersion string `json:"firmware_version,omitempty" yaml:"firmware_version,omitempty"`
-}
-
-// ResourcesNetworkCardPort represents a network port on the system
-// API extension: resources_v2
-type ResourcesNetworkCardPort struct {
- ID string `json:"id" yaml:"id"`
- Address string `json:"address,omitempty" yaml:"address,omitempty"`
- Port uint64 `json:"port" yaml:"port"`
- Protocol string `json:"protocol" yaml:"protocol"`
-
- SupportedModes []string `json:"supported_modes,omitempty" yaml:"supported_modes,omitempty"`
- SupportedPorts []string `json:"supported_ports,omitempty" yaml:"supported_ports,omitempty"`
-
- PortType string `json:"port_type,omitempty" yaml:"port_type,omitempty"`
- TransceiverType string `json:"transceiver_type,omitempty" yaml:"transceiver_type,omitempty"`
-
- AutoNegotiation bool `json:"auto_negotiation" yaml:"auto_negotiation"`
- LinkDetected bool `json:"link_detected" yaml:"link_detected"`
- LinkSpeed uint64 `json:"link_speed,omitempty" yaml:"link_speed,omitempty"`
- LinkDuplex string `json:"link_duplex,omitempty" yaml:"link_duplex,omitempty"`
-
- // API extension: resources_infiniband
- Infiniband *ResourcesNetworkCardPortInfiniband `json:"infiniband,omitempty" yaml:"infiniband,omitempty"`
-}
-
-// ResourcesNetworkCardPortInfiniband represents the Linux Infiniband configuration for the port
-// API extension: resources_infiniband
-type ResourcesNetworkCardPortInfiniband struct {
- IsSMName string `json:"issm_name,omitempty" yaml:"issm_name,omitempty"`
- IsSMDevice string `json:"issm_device,omitempty" yaml:"issm_device,omitempty"`
-
- MADName string `json:"mad_name,omitempty" yaml:"mad_name,omitempty"`
- MADDevice string `json:"mad_device,omitempty" yaml:"mad_device,omitempty"`
-
- VerbName string `json:"verb_name,omitempty" yaml:"verb_name,omitempty"`
- VerbDevice string `json:"verb_device,omitempty" yaml:"verb_device,omitempty"`
-}
-
-// ResourcesNetworkCardSRIOV represents the SRIOV configuration of the network card
-// API extension: resources_v2
-type ResourcesNetworkCardSRIOV struct {
- CurrentVFs uint64 `json:"current_vfs" yaml:"current_vfs"`
- MaximumVFs uint64 `json:"maximum_vfs" yaml:"maximum_vfs"`
-
- VFs []ResourcesNetworkCard `json:"vfs" yaml:"vfs"`
-}
-
-// ResourcesStorage represents the local storage
-// API extension: resources_v2
-type ResourcesStorage struct {
- Disks []ResourcesStorageDisk `json:"disks" yaml:"disks"`
- Total uint64 `json:"total" yaml:"total"`
-}
-
-// ResourcesStorageDisk represents a disk
-// API extension: resources_v2
-type ResourcesStorageDisk struct {
- ID string `json:"id" yaml:"id"`
- Device string `json:"device" yaml:"device"`
- Model string `json:"model,omitempty" yaml:"model,omitempty"`
- Type string `json:"type,omitempty" yaml:"type,omitempty"`
- ReadOnly bool `json:"read_only" yaml:"read_only"`
- Size uint64 `json:"size" yaml:"size"`
-
- Removable bool `json:"removable" yaml:"removable"`
- WWN string `json:"wwn,omitempty" yaml:"wwn,omitempty"`
- NUMANode uint64 `json:"numa_node" yaml:"numa_node"`
-
- // API extension: resources_disk_sata
- DevicePath string `json:"device_path" yaml:"device_path"`
- BlockSize uint64 `json:"block_size" yaml:"block_size"`
- FirmwareVersion string `json:"firmware_version,omitempty" yaml:"firmware_version,omitempty"`
- RPM uint64 `json:"rpm" yaml:"rpm"`
- Serial string `json:"serial,omitempty" yaml:"serial,omitempty"`
-
- Partitions []ResourcesStorageDiskPartition `json:"partitions" yaml:"partitions"`
-}
-
-// ResourcesStorageDiskPartition represents a partition on a disk
-// API extension: resources_v2
-type ResourcesStorageDiskPartition struct {
- ID string `json:"id" yaml:"id"`
- Device string `json:"device" yaml:"device"`
- ReadOnly bool `json:"read_only" yaml:"read_only"`
- Size uint64 `json:"size" yaml:"size"`
-
- Partition uint64 `json:"partition" yaml:"partition"`
-}
-
-// ResourcesMemory represents the memory resources available on the system
-// API extension: resources
-type ResourcesMemory struct {
- // API extension: resources_v2
- Nodes []ResourcesMemoryNode `json:"nodes,omitempty" yaml:"nodes,omitempty"`
- HugepagesTotal uint64 `json:"hugepages_total" yaml:"hugepages_total"`
- HugepagesUsed uint64 `json:"hugepages_used" yaml:"hugepages_used"`
- HugepagesSize uint64 `json:"hugepages_size" yaml:"hugepages_size"`
-
- Used uint64 `json:"used" yaml:"used"`
- Total uint64 `json:"total" yaml:"total"`
-}
-
-// ResourcesMemoryNode represents the node-specific memory resources available on the system
-// API extension: resources_v2
-type ResourcesMemoryNode struct {
- NUMANode uint64 `json:"numa_node" yaml:"numa_node"`
- HugepagesUsed uint64 `json:"hugepages_used" yaml:"hugepages_used"`
- HugepagesTotal uint64 `json:"hugepages_total" yaml:"hugepages_total"`
-
- Used uint64 `json:"used" yaml:"used"`
- Total uint64 `json:"total" yaml:"total"`
-}
-
-// ResourcesStoragePool represents the resources available to a given storage pool
-// API extension: resources
-type ResourcesStoragePool struct {
- Space ResourcesStoragePoolSpace `json:"space,omitempty" yaml:"space,omitempty"`
- Inodes ResourcesStoragePoolInodes `json:"inodes,omitempty" yaml:"inodes,omitempty"`
-}
-
-// ResourcesStoragePoolSpace represents the space available to a given storage pool
-// API extension: resources
-type ResourcesStoragePoolSpace struct {
- Used uint64 `json:"used,omitempty" yaml:"used,omitempty"`
- Total uint64 `json:"total" yaml:"total"`
-}
-
-// ResourcesStoragePoolInodes represents the inodes available to a given storage pool
-// API extension: resources
-type ResourcesStoragePoolInodes struct {
- Used uint64 `json:"used" yaml:"used"`
- Total uint64 `json:"total" yaml:"total"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/response.go b/vendor/github.com/lxc/lxd/shared/api/response.go
deleted file mode 100644
index 4f4e04497714..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/response.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package api
-
-import (
- "encoding/json"
-)
-
-// ResponseRaw represents a LXD operation in its original form
-type ResponseRaw struct {
- Type ResponseType `json:"type" yaml:"type"`
-
- // Valid only for Sync responses
- Status string `json:"status" yaml:"status"`
- StatusCode int `json:"status_code" yaml:"status_code"`
-
- // Valid only for Async responses
- Operation string `json:"operation" yaml:"operation"`
-
- // Valid only for Error responses
- Code int `json:"error_code" yaml:"error_code"`
- Error string `json:"error" yaml:"error"`
-
- Metadata interface{} `json:"metadata" yaml:"metadata"`
-}
-
-// Response represents a LXD operation
-type Response struct {
- Type ResponseType `json:"type" yaml:"type"`
-
- // Valid only for Sync responses
- Status string `json:"status" yaml:"status"`
- StatusCode int `json:"status_code" yaml:"status_code"`
-
- // Valid only for Async responses
- Operation string `json:"operation" yaml:"operation"`
-
- // Valid only for Error responses
- Code int `json:"error_code" yaml:"error_code"`
- Error string `json:"error" yaml:"error"`
-
- // Valid for Sync and Error responses
- Metadata json.RawMessage `json:"metadata" yaml:"metadata"`
-}
-
-// MetadataAsMap parses the Response metadata into a map
-func (r *Response) MetadataAsMap() (map[string]interface{}, error) {
- ret := map[string]interface{}{}
- err := r.MetadataAsStruct(&ret)
- if err != nil {
- return nil, err
- }
-
- return ret, nil
-}
-
-// MetadataAsOperation turns the Response metadata into an Operation
-func (r *Response) MetadataAsOperation() (*Operation, error) {
- op := Operation{}
- err := r.MetadataAsStruct(&op)
- if err != nil {
- return nil, err
- }
-
- return &op, nil
-}
-
-// MetadataAsStringSlice parses the Response metadata into a slice of string
-func (r *Response) MetadataAsStringSlice() ([]string, error) {
- sl := []string{}
- err := r.MetadataAsStruct(&sl)
- if err != nil {
- return nil, err
- }
-
- return sl, nil
-}
-
-// MetadataAsStruct parses the Response metadata into a provided struct
-func (r *Response) MetadataAsStruct(target interface{}) error {
- return json.Unmarshal(r.Metadata, &target)
-}
-
-// ResponseType represents a valid LXD response type
-type ResponseType string
-
-// LXD response types
-const (
- SyncResponse ResponseType = "sync"
- AsyncResponse ResponseType = "async"
- ErrorResponse ResponseType = "error"
-)
diff --git a/vendor/github.com/lxc/lxd/shared/api/server.go b/vendor/github.com/lxc/lxd/shared/api/server.go
deleted file mode 100644
index c40b44e410d3..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/server.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package api
-
-// ServerEnvironment represents the read-only environment fields of a LXD server
-type ServerEnvironment struct {
- Addresses []string `json:"addresses" yaml:"addresses"`
- Architectures []string `json:"architectures" yaml:"architectures"`
- Certificate string `json:"certificate" yaml:"certificate"`
- CertificateFingerprint string `json:"certificate_fingerprint" yaml:"certificate_fingerprint"`
- Driver string `json:"driver" yaml:"driver"`
- DriverVersion string `json:"driver_version" yaml:"driver_version"`
- Kernel string `json:"kernel" yaml:"kernel"`
- KernelArchitecture string `json:"kernel_architecture" yaml:"kernel_architecture"`
-
- // API extension: kernel_features
- KernelFeatures map[string]string `json:"kernel_features" yaml:"kernel_features"`
-
- KernelVersion string `json:"kernel_version" yaml:"kernel_version"`
-
- // API extension: lxc_features
- LXCFeatures map[string]string `json:"lxc_features" yaml:"lxc_features"`
-
- // API extension: projects
- Project string `json:"project" yaml:"project"`
-
- Server string `json:"server" yaml:"server"`
-
- // API extension: clustering
- ServerClustered bool `json:"server_clustered" yaml:"server_clustered"`
- ServerName string `json:"server_name" yaml:"server_name"`
-
- ServerPid int `json:"server_pid" yaml:"server_pid"`
- ServerVersion string `json:"server_version" yaml:"server_version"`
- Storage string `json:"storage" yaml:"storage"`
- StorageVersion string `json:"storage_version" yaml:"storage_version"`
-}
-
-// ServerPut represents the modifiable fields of a LXD server configuration
-type ServerPut struct {
- Config map[string]interface{} `json:"config" yaml:"config"`
-}
-
-// ServerUntrusted represents a LXD server for an untrusted client
-type ServerUntrusted struct {
- APIExtensions []string `json:"api_extensions" yaml:"api_extensions"`
- APIStatus string `json:"api_status" yaml:"api_status"`
- APIVersion string `json:"api_version" yaml:"api_version"`
- Auth string `json:"auth" yaml:"auth"`
- Public bool `json:"public" yaml:"public"`
-
- // API extension: macaroon_authentication
- AuthMethods []string `json:"auth_methods" yaml:"auth_methods"`
-}
-
-// Server represents a LXD server
-type Server struct {
- ServerPut `yaml:",inline"`
- ServerUntrusted `yaml:",inline"`
-
- Environment ServerEnvironment `json:"environment" yaml:"environment"`
-}
-
-// Writable converts a full Server struct into a ServerPut struct (filters read-only fields)
-func (srv *Server) Writable() ServerPut {
- return srv.ServerPut
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/status_code.go b/vendor/github.com/lxc/lxd/shared/api/status_code.go
deleted file mode 100644
index bf2986607b21..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/status_code.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package api
-
-// StatusCode represents a valid LXD operation and container status
-type StatusCode int
-
-// LXD status codes
-const (
- OperationCreated StatusCode = 100
- Started StatusCode = 101
- Stopped StatusCode = 102
- Running StatusCode = 103
- Cancelling StatusCode = 104
- Pending StatusCode = 105
- Starting StatusCode = 106
- Stopping StatusCode = 107
- Aborting StatusCode = 108
- Freezing StatusCode = 109
- Frozen StatusCode = 110
- Thawed StatusCode = 111
- Error StatusCode = 112
-
- Success StatusCode = 200
-
- Failure StatusCode = 400
- Cancelled StatusCode = 401
-)
-
-// String returns a suitable string representation for the status code
-func (o StatusCode) String() string {
- return map[StatusCode]string{
- OperationCreated: "Operation created",
- Started: "Started",
- Stopped: "Stopped",
- Running: "Running",
- Cancelling: "Cancelling",
- Pending: "Pending",
- Success: "Success",
- Failure: "Failure",
- Cancelled: "Cancelled",
- Starting: "Starting",
- Stopping: "Stopping",
- Aborting: "Aborting",
- Freezing: "Freezing",
- Frozen: "Frozen",
- Thawed: "Thawed",
- Error: "Error",
- }[o]
-}
-
-// IsFinal will return true if the status code indicates an end state
-func (o StatusCode) IsFinal() bool {
- return int(o) >= 200
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/storage_pool.go b/vendor/github.com/lxc/lxd/shared/api/storage_pool.go
deleted file mode 100644
index 16cf4fad3057..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/storage_pool.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package api
-
-// StoragePoolsPost represents the fields of a new LXD storage pool
-//
-// API extension: storage
-type StoragePoolsPost struct {
- StoragePoolPut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name"`
- Driver string `json:"driver" yaml:"driver"`
-}
-
-// StoragePool represents the fields of a LXD storage pool.
-//
-// API extension: storage
-type StoragePool struct {
- StoragePoolPut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name"`
- Driver string `json:"driver" yaml:"driver"`
- UsedBy []string `json:"used_by" yaml:"used_by"`
-
- // API extension: clustering
- Status string `json:"status" yaml:"status"`
- Locations []string `json:"locations" yaml:"locations"`
-}
-
-// StoragePoolPut represents the modifiable fields of a LXD storage pool.
-//
-// API extension: storage
-type StoragePoolPut struct {
- Config map[string]string `json:"config" yaml:"config"`
-
- // API extension: entity_description
- Description string `json:"description" yaml:"description"`
-}
-
-// Writable converts a full StoragePool struct into a StoragePoolPut struct
-// (filters read-only fields).
-func (storagePool *StoragePool) Writable() StoragePoolPut {
- return storagePool.StoragePoolPut
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume.go b/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume.go
deleted file mode 100644
index db916ff0fac3..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package api
-
-// StorageVolumesPost represents the fields of a new LXD storage pool volume
-//
-// API extension: storage
-type StorageVolumesPost struct {
- StorageVolumePut `yaml:",inline"`
-
- Name string `json:"name" yaml:"name"`
- Type string `json:"type" yaml:"type"`
-
- // API extension: storage_api_local_volume_handling
- Source StorageVolumeSource `json:"source" yaml:"source"`
-}
-
-// StorageVolumePost represents the fields required to rename a LXD storage pool volume
-//
-// API extension: storage_api_volume_rename
-type StorageVolumePost struct {
- Name string `json:"name" yaml:"name"`
-
- // API extension: storage_api_local_volume_handling
- Pool string `json:"pool,omitempty" yaml:"pool,omitempty"`
-
- // API extension: storage_api_remote_volume_handling
- Migration bool `json:"migration" yaml:"migration"`
-
- // API extension: storage_api_remote_volume_handling
- Target *StorageVolumePostTarget `json:"target" yaml:"target"`
-
- // API extension: storage_api_remote_volume_snapshots
- VolumeOnly bool `json:"volume_only" yaml:"volume_only"`
-}
-
-// StorageVolumePostTarget represents the migration target host and operation
-//
-// API extension: storage_api_remote_volume_handling
-type StorageVolumePostTarget struct {
- Certificate string `json:"certificate" yaml:"certificate"`
- Operation string `json:"operation,omitempty" yaml:"operation,omitempty"`
- Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"`
-}
-
-// StorageVolume represents the fields of a LXD storage volume.
-//
-// API extension: storage
-type StorageVolume struct {
- StorageVolumePut `yaml:",inline"`
- Name string `json:"name" yaml:"name"`
- Type string `json:"type" yaml:"type"`
- UsedBy []string `json:"used_by" yaml:"used_by"`
-
- // API extension: clustering
- Location string `json:"location" yaml:"location"`
-}
-
-// StorageVolumePut represents the modifiable fields of a LXD storage volume.
-//
-// API extension: storage
-type StorageVolumePut struct {
- Config map[string]string `json:"config" yaml:"config"`
-
- // API extension: entity_description
- Description string `json:"description" yaml:"description"`
-
- // API extension: storage_api_volume_snapshots
- Restore string `json:"restore,omitempty" yaml:"restore,omitempty"`
-}
-
-// StorageVolumeSource represents the creation source for a new storage volume.
-//
-// API extension: storage_api_local_volume_handling
-type StorageVolumeSource struct {
- Name string `json:"name" yaml:"name"`
- Type string `json:"type" yaml:"type"`
- Pool string `json:"pool" yaml:"pool"`
-
- // API extension: storage_api_remote_volume_handling
- Certificate string `json:"certificate" yaml:"certificate"`
- Mode string `json:"mode,omitempty" yaml:"mode,omitempty"`
- Operation string `json:"operation,omitempty" yaml:"operation,omitempty"`
- Websockets map[string]string `json:"secrets,omitempty" yaml:"secrets,omitempty"`
-
- // API extension: storage_api_volume_snapshots
- VolumeOnly bool `json:"volume_only" yaml:"volume_only"`
-}
-
-// Writable converts a full StorageVolume struct into a StorageVolumePut struct
-// (filters read-only fields).
-func (storageVolume *StorageVolume) Writable() StorageVolumePut {
- return storageVolume.StorageVolumePut
-}
diff --git a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_snapshot.go b/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_snapshot.go
deleted file mode 100644
index 4ba21da05d2e..000000000000
--- a/vendor/github.com/lxc/lxd/shared/api/storage_pool_volume_snapshot.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package api
-
-// StorageVolumeSnapshotsPost represents the fields available for a new LXD storage volume snapshot
-//
-// API extension: storage_api_volume_snapshots
-type StorageVolumeSnapshotsPost struct {
- Name string `json:"name" yaml:"name"`
-}
-
-// StorageVolumeSnapshotPost represents the fields required to rename/move a LXD storage volume snapshot
-//
-// API extension: storage_api_volume_snapshots
-type StorageVolumeSnapshotPost struct {
- Name string `json:"name" yaml:"name"`
-}
-
-// StorageVolumeSnapshot represents a LXD storage volume snapshot
-//
-// API extension: storage_api_volume_snapshots
-type StorageVolumeSnapshot struct {
- Name string `json:"name" yaml:"name"`
- Config map[string]string `json:"config" yaml:"config"`
- Description string `json:"description" yaml:"description"`
-}
-
-// StorageVolumeSnapshotPut represents the modifiable fields of a LXD storage volume
-//
-// API extension: storage_api_volume_snapshots
-type StorageVolumeSnapshotPut struct {
- Description string `json:"description" yaml:"description"`
-}
diff --git a/vendor/github.com/lxc/lxd/shared/archive_linux.go b/vendor/github.com/lxc/lxd/shared/archive_linux.go
deleted file mode 100644
index a77a09b70a74..000000000000
--- a/vendor/github.com/lxc/lxd/shared/archive_linux.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package shared
-
-import (
- "bytes"
- "fmt"
- "io"
- "os"
- "strings"
-
- "golang.org/x/sys/unix"
-
- "github.com/lxc/lxd/shared/ioprogress"
- "github.com/lxc/lxd/shared/logger"
-)
-
-func DetectCompression(fname string) ([]string, string, []string, error) {
- f, err := os.Open(fname)
- if err != nil {
- return nil, "", nil, err
- }
- defer f.Close()
-
- return DetectCompressionFile(f)
-}
-
-func DetectCompressionFile(f io.ReadSeeker) ([]string, string, []string, error) {
- // read header parts to detect compression method
- // bz2 - 2 bytes, 'BZ' signature/magic number
- // gz - 2 bytes, 0x1f 0x8b
- // lzma - 6 bytes, { [0x000, 0xE0], '7', 'z', 'X', 'Z', 0x00 } -
- // xy - 6 bytes, header format { 0xFD, '7', 'z', 'X', 'Z', 0x00 }
- // tar - 263 bytes, trying to get ustar from 257 - 262
- header := make([]byte, 263)
- _, err := f.Read(header)
- if err != nil {
- return nil, "", nil, err
- }
-
- switch {
- case bytes.Equal(header[0:2], []byte{'B', 'Z'}):
- return []string{"-jxf"}, ".tar.bz2", []string{"bzip2", "-d"}, nil
- case bytes.Equal(header[0:2], []byte{0x1f, 0x8b}):
- return []string{"-zxf"}, ".tar.gz", []string{"gzip", "-d"}, nil
- case (bytes.Equal(header[1:5], []byte{'7', 'z', 'X', 'Z'}) && header[0] == 0xFD):
- return []string{"-Jxf"}, ".tar.xz", []string{"xz", "-d"}, nil
- case (bytes.Equal(header[1:5], []byte{'7', 'z', 'X', 'Z'}) && header[0] != 0xFD):
- return []string{"--lzma", "-xf"}, ".tar.lzma", []string{"lzma", "-d"}, nil
- case bytes.Equal(header[0:3], []byte{0x5d, 0x00, 0x00}):
- return []string{"--lzma", "-xf"}, ".tar.lzma", []string{"lzma", "-d"}, nil
- case bytes.Equal(header[257:262], []byte{'u', 's', 't', 'a', 'r'}):
- return []string{"-xf"}, ".tar", []string{}, nil
- case bytes.Equal(header[0:4], []byte{'h', 's', 'q', 's'}):
- return []string{"-xf"}, ".squashfs",
- []string{"sqfs2tar", "--no-skip"}, nil
- default:
- return nil, "", nil, fmt.Errorf("Unsupported compression")
- }
-}
-
-func Unpack(file string, path string, blockBackend bool, runningInUserns bool, tracker *ioprogress.ProgressTracker) error {
- extractArgs, extension, _, err := DetectCompression(file)
- if err != nil {
- return err
- }
-
- command := ""
- args := []string{}
- var reader io.Reader
- if strings.HasPrefix(extension, ".tar") {
- command = "tar"
- if runningInUserns {
- args = append(args, "--wildcards")
- args = append(args, "--exclude=dev/*")
- args = append(args, "--exclude=./dev/*")
- args = append(args, "--exclude=rootfs/dev/*")
- args = append(args, "--exclude=rootfs/./dev/*")
- }
- args = append(args, "-C", path, "--numeric-owner", "--xattrs-include=*")
- args = append(args, extractArgs...)
- args = append(args, "-")
-
- f, err := os.Open(file)
- if err != nil {
- return err
- }
- defer f.Close()
-
- reader = f
-
- // Attach the ProgressTracker if supplied.
- if tracker != nil {
- fsinfo, err := f.Stat()
- if err != nil {
- return err
- }
-
- tracker.Length = fsinfo.Size()
- reader = &ioprogress.ProgressReader{
- ReadCloser: f,
- Tracker: tracker,
- }
- }
- } else if strings.HasPrefix(extension, ".squashfs") {
- // unsquashfs does not support reading from stdin,
- // so ProgressTracker is not possible.
- command = "unsquashfs"
- args = append(args, "-f", "-d", path, "-n")
-
- // Limit unsquashfs chunk size to 10% of memory and up to 256MB (default)
- // When running on a low memory system, also disable multi-processing
- mem, err := DeviceTotalMemory()
- mem = mem / 1024 / 1024 / 10
- if err == nil && mem < 256 {
- args = append(args, "-da", fmt.Sprintf("%d", mem), "-fr", fmt.Sprintf("%d", mem), "-p", "1")
- }
-
- args = append(args, file)
- } else {
- return fmt.Errorf("Unsupported image format: %s", extension)
- }
-
- err = RunCommandWithFds(reader, nil, command, args...)
- if err != nil {
- // Check if we ran out of space
- fs := unix.Statfs_t{}
-
- err1 := unix.Statfs(path, &fs)
- if err1 != nil {
- return err1
- }
-
- // Check if we're running out of space
- if int64(fs.Bfree) < int64(2*fs.Bsize) {
- if blockBackend {
- return fmt.Errorf("Unable to unpack image, run out of disk space (consider increasing your pool's volume.size)")
- } else {
- return fmt.Errorf("Unable to unpack image, run out of disk space")
- }
- }
-
- logger.Debugf("Unpacking failed")
- logger.Debugf(err.Error())
- return fmt.Errorf("Unpack failed, %s.", err)
- }
-
- return nil
-}
diff --git a/vendor/github.com/lxc/lxd/shared/cancel/canceler.go b/vendor/github.com/lxc/lxd/shared/cancel/canceler.go
deleted file mode 100644
index b3356cf37e5c..000000000000
--- a/vendor/github.com/lxc/lxd/shared/cancel/canceler.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package cancel
-
-import (
- "fmt"
- "net/http"
- "sync"
-)
-
-// Canceler tracks a cancelable operation
-type Canceler struct {
- reqChCancel map[*http.Request]chan struct{}
- lock sync.Mutex
-}
-
-// NewCanceler returns a new Canceler struct
-func NewCanceler() *Canceler {
- c := Canceler{}
-
- c.lock.Lock()
- c.reqChCancel = make(map[*http.Request]chan struct{})
- c.lock.Unlock()
-
- return &c
-}
-
-// Cancelable indicates whether there are operations that support cancelation
-func (c *Canceler) Cancelable() bool {
- c.lock.Lock()
- length := len(c.reqChCancel)
- c.lock.Unlock()
-
- return length > 0
-}
-
-// Cancel will attempt to cancel all ongoing operations
-func (c *Canceler) Cancel() error {
- if !c.Cancelable() {
- return fmt.Errorf("This operation can't be canceled at this time")
- }
-
- c.lock.Lock()
- for req, ch := range c.reqChCancel {
- close(ch)
- delete(c.reqChCancel, req)
- }
- c.lock.Unlock()
-
- return nil
-}
-
-// CancelableDownload performs an http request and allows for it to be canceled at any time
-func CancelableDownload(c *Canceler, client *http.Client, req *http.Request) (*http.Response, chan bool, error) {
- chDone := make(chan bool)
- chCancel := make(chan struct{})
- if c != nil {
- c.lock.Lock()
- c.reqChCancel[req] = chCancel
- c.lock.Unlock()
- }
- req.Cancel = chCancel
-
- go func() {
- <-chDone
- if c != nil {
- c.lock.Lock()
- delete(c.reqChCancel, req)
- c.lock.Unlock()
- }
- }()
-
- resp, err := client.Do(req)
- return resp, chDone, err
-}
diff --git a/vendor/github.com/lxc/lxd/shared/cert.go b/vendor/github.com/lxc/lxd/shared/cert.go
deleted file mode 100644
index b38fa93a6784..000000000000
--- a/vendor/github.com/lxc/lxd/shared/cert.go
+++ /dev/null
@@ -1,531 +0,0 @@
-// http://golang.org/src/pkg/crypto/tls/generate_cert.go
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package shared
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha256"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/pem"
- "fmt"
- "io/ioutil"
- "math/big"
- "net"
- "net/http"
- "os"
- "os/user"
- "path"
- "path/filepath"
- "time"
-)
-
-// KeyPairAndCA returns a CertInfo object with a reference to the key pair and
-// (optionally) CA certificate located in the given directory and having the
-// given name prefix
-//
-// The naming conversion for the various files is:
-//
-// .crt -> public key
-// .key -> private key
-// .ca -> CA certificate
-//
-// If no public/private key files are found, a new key pair will be generated
-// and saved on disk.
-//
-// If a CA certificate is found, it will be returned as well as second return
-// value (otherwise it will be nil).
-func KeyPairAndCA(dir, prefix string, kind CertKind) (*CertInfo, error) {
- certFilename := filepath.Join(dir, prefix+".crt")
- keyFilename := filepath.Join(dir, prefix+".key")
-
- // Ensure that the certificate exists, or create a new one if it does
- // not.
- err := FindOrGenCert(certFilename, keyFilename, kind == CertClient)
- if err != nil {
- return nil, err
- }
-
- // Load the certificate.
- keypair, err := tls.LoadX509KeyPair(certFilename, keyFilename)
- if err != nil {
- return nil, err
- }
-
- // If available, load the CA data as well.
- caFilename := filepath.Join(dir, prefix+".ca")
- var ca *x509.Certificate
- if PathExists(caFilename) {
- ca, err = ReadCert(caFilename)
- if err != nil {
- return nil, err
- }
- }
-
- info := &CertInfo{
- keypair: keypair,
- ca: ca,
- }
- return info, nil
-}
-
-// CertInfo captures TLS certificate information about a certain public/private
-// keypair and an optional CA certificate.
-//
-// Given LXD's support for PKI setups, these two bits of information are
-// normally used and passed around together, so this structure helps with that
-// (see doc/security.md for more details).
-type CertInfo struct {
- keypair tls.Certificate
- ca *x509.Certificate
-}
-
-// KeyPair returns the public/private key pair.
-func (c *CertInfo) KeyPair() tls.Certificate {
- return c.keypair
-}
-
-// CA returns the CA certificate.
-func (c *CertInfo) CA() *x509.Certificate {
- return c.ca
-}
-
-// PublicKey is a convenience to encode the underlying public key to ASCII.
-func (c *CertInfo) PublicKey() []byte {
- data := c.KeyPair().Certificate[0]
- return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: data})
-}
-
-// PrivateKey is a convenience to encode the underlying private key.
-func (c *CertInfo) PrivateKey() []byte {
- ecKey, ok := c.KeyPair().PrivateKey.(*ecdsa.PrivateKey)
- if ok {
- data, err := x509.MarshalECPrivateKey(ecKey)
- if err != nil {
- return nil
- }
-
- return pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
- }
-
- rsaKey, ok := c.KeyPair().PrivateKey.(*rsa.PrivateKey)
- if ok {
- data := x509.MarshalPKCS1PrivateKey(rsaKey)
- return pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: data})
- }
-
- return nil
-}
-
-// Fingerprint returns the fingerprint of the public key.
-func (c *CertInfo) Fingerprint() string {
- fingerprint, err := CertFingerprintStr(string(c.PublicKey()))
- // Parsing should never fail, since we generated the cert ourselves,
- // but let's check the error for good measure.
- if err != nil {
- panic("invalid public key material")
- }
- return fingerprint
-}
-
-// CertKind defines the kind of certificate to generate from scratch in
-// KeyPairAndCA when it's not there.
-//
-// The two possible kinds are client and server, and they differ in the
-// ext-key-usage bitmaps. See GenerateMemCert for more details.
-type CertKind int
-
-// Possible kinds of certificates.
-const (
- CertClient CertKind = iota
- CertServer
-)
-
-// TestingKeyPair returns CertInfo object initialized with a test keypair. It's
-// meant to be used only by tests.
-func TestingKeyPair() *CertInfo {
- keypair, err := tls.X509KeyPair(testCertPEMBlock, testKeyPEMBlock)
- if err != nil {
- panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
- }
- cert := &CertInfo{
- keypair: keypair,
- }
- return cert
-}
-
-// TestingAltKeyPair returns CertInfo object initialized with a test keypair
-// which differs from the one returned by TestCertInfo. It's meant to be used
-// only by tests.
-func TestingAltKeyPair() *CertInfo {
- keypair, err := tls.X509KeyPair(testAltCertPEMBlock, testAltKeyPEMBlock)
- if err != nil {
- panic(fmt.Sprintf("invalid X509 keypair material: %v", err))
- }
- cert := &CertInfo{
- keypair: keypair,
- }
- return cert
-}
-
-/*
- * Generate a list of names for which the certificate will be valid.
- * This will include the hostname and ip address
- */
-func mynames() ([]string, error) {
- h, err := os.Hostname()
- if err != nil {
- return nil, err
- }
-
- ret := []string{h}
-
- ifs, err := net.Interfaces()
- if err != nil {
- return nil, err
- }
-
- for _, iface := range ifs {
- if IsLoopback(&iface) {
- continue
- }
-
- addrs, err := iface.Addrs()
- if err != nil {
- return nil, err
- }
-
- for _, addr := range addrs {
- ret = append(ret, addr.String())
- }
- }
-
- return ret, nil
-}
-
-func FindOrGenCert(certf string, keyf string, certtype bool) error {
- if PathExists(certf) && PathExists(keyf) {
- return nil
- }
-
- /* If neither stat succeeded, then this is our first run and we
- * need to generate cert and privkey */
- err := GenCert(certf, keyf, certtype)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// GenCert will create and populate a certificate file and a key file
-func GenCert(certf string, keyf string, certtype bool) error {
- /* Create the basenames if needed */
- dir := path.Dir(certf)
- err := os.MkdirAll(dir, 0750)
- if err != nil {
- return err
- }
- dir = path.Dir(keyf)
- err = os.MkdirAll(dir, 0750)
- if err != nil {
- return err
- }
-
- certBytes, keyBytes, err := GenerateMemCert(certtype)
- if err != nil {
- return err
- }
-
- certOut, err := os.Create(certf)
- if err != nil {
- return fmt.Errorf("Failed to open %s for writing: %v", certf, err)
- }
- certOut.Write(certBytes)
- certOut.Close()
-
- keyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
- if err != nil {
- return fmt.Errorf("Failed to open %s for writing: %v", keyf, err)
- }
- keyOut.Write(keyBytes)
- keyOut.Close()
- return nil
-}
-
-// GenerateMemCert creates client or server certificate and key pair,
-// returning them as byte arrays in memory.
-func GenerateMemCert(client bool) ([]byte, []byte, error) {
- privk, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
- if err != nil {
- return nil, nil, fmt.Errorf("Failed to generate key: %v", err)
- }
-
- hosts, err := mynames()
- if err != nil {
- return nil, nil, fmt.Errorf("Failed to get my hostname: %v", err)
- }
-
- validFrom := time.Now()
- validTo := validFrom.Add(10 * 365 * 24 * time.Hour)
-
- serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
- serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
- if err != nil {
- return nil, nil, fmt.Errorf("Failed to generate serial number: %v", err)
- }
-
- userEntry, err := user.Current()
- var username string
- if err == nil {
- username = userEntry.Username
- if username == "" {
- username = "UNKNOWN"
- }
- } else {
- username = "UNKNOWN"
- }
-
- hostname, err := os.Hostname()
- if err != nil {
- hostname = "UNKNOWN"
- }
-
- template := x509.Certificate{
- SerialNumber: serialNumber,
- Subject: pkix.Name{
- Organization: []string{"linuxcontainers.org"},
- CommonName: fmt.Sprintf("%s@%s", username, hostname),
- },
- NotBefore: validFrom,
- NotAfter: validTo,
-
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
- BasicConstraintsValid: true,
- }
-
- if client {
- template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
- } else {
- template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}
- }
-
- for _, h := range hosts {
- if ip, _, err := net.ParseCIDR(h); err == nil {
- if !ip.IsLinkLocalUnicast() && !ip.IsLinkLocalMulticast() {
- template.IPAddresses = append(template.IPAddresses, ip)
- }
- } else {
- template.DNSNames = append(template.DNSNames, h)
- }
- }
-
- derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privk.PublicKey, privk)
- if err != nil {
- return nil, nil, fmt.Errorf("Failed to create certificate: %v", err)
- }
-
- data, err := x509.MarshalECPrivateKey(privk)
- if err != nil {
- return nil, nil, err
- }
-
- cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
- key := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: data})
-
- return cert, key, nil
-}
-
-func ReadCert(fpath string) (*x509.Certificate, error) {
- cf, err := ioutil.ReadFile(fpath)
- if err != nil {
- return nil, err
- }
-
- certBlock, _ := pem.Decode(cf)
- if certBlock == nil {
- return nil, fmt.Errorf("Invalid certificate file")
- }
-
- return x509.ParseCertificate(certBlock.Bytes)
-}
-
-func CertFingerprint(cert *x509.Certificate) string {
- return fmt.Sprintf("%x", sha256.Sum256(cert.Raw))
-}
-
-func CertFingerprintStr(c string) (string, error) {
- pemCertificate, _ := pem.Decode([]byte(c))
- if pemCertificate == nil {
- return "", fmt.Errorf("invalid certificate")
- }
-
- cert, err := x509.ParseCertificate(pemCertificate.Bytes)
- if err != nil {
- return "", err
- }
-
- return CertFingerprint(cert), nil
-}
-
-func GetRemoteCertificate(address string) (*x509.Certificate, error) {
- // Setup a permissive TLS config
- tlsConfig, err := GetTLSConfig("", "", "", nil)
- if err != nil {
- return nil, err
- }
-
- tlsConfig.InsecureSkipVerify = true
-
- // Support disabling of strict ciphers
- if IsTrue(os.Getenv("LXD_INSECURE_TLS")) {
- tlsConfig.CipherSuites = nil
- }
-
- tr := &http.Transport{
- TLSClientConfig: tlsConfig,
- Dial: RFC3493Dialer,
- Proxy: ProxyFromEnvironment,
- }
-
- // Connect
- client := &http.Client{Transport: tr}
- resp, err := client.Get(address)
- if err != nil {
- return nil, err
- }
-
- // Retrieve the certificate
- if resp.TLS == nil || len(resp.TLS.PeerCertificates) == 0 {
- return nil, fmt.Errorf("Unable to read remote TLS certificate")
- }
-
- return resp.TLS.PeerCertificates[0], nil
-}
-
-var testCertPEMBlock = []byte(`-----BEGIN CERTIFICATE-----
-MIIFzjCCA7igAwIBAgIRAKnCQRdpkZ86oXYOd9hGrPgwCwYJKoZIhvcNAQELMB4x
-HDAaBgNVBAoTE2xpbnV4Y29udGFpbmVycy5vcmcwHhcNMTUwNzE1MDQ1NjQ0WhcN
-MjUwNzEyMDQ1NjQ0WjAeMRwwGgYDVQQKExNsaW51eGNvbnRhaW5lcnMub3JnMIIC
-IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAyViJkCzoxa1NYilXqGJog6xz
-lSm4xt8KIzayc0JdB9VxEdIVdJqUzBAUtyCS4KZ9MbPmMEOX9NbBASL0tRK58/7K
-Scq99Kj4XbVMLU1P/y5aW0ymnF0OpKbG6unmgAI2k/duRlbYHvGRdhlswpKl0Yst
-l8i2kXOK0Rxcz90FewcEXGSnIYW21sz8YpBLfIZqOx6XEV36mOdi3MLrhUSAhXDw
-Pay33Y7NonCQUBtiO7BT938cqI14FJrWdKon1UnODtzONcVBLTWtoe7D41+mx7EE
-Taq5OPxBSe0DD6KQcPOZ7ZSJEhIqVKMvzLyiOJpyShmhm4OuGNoAG6jAuSij/9Kc
-aLU4IitcrvFOuAo8M9OpiY9ZCR7Gb/qaPAXPAxE7Ci3f9DDNKXtPXDjhj3YG01+h
-fNXMW3kCkMImn0A/+mZUMdCL87GWN2AN3Do5qaIc5XVEt1gp+LVqJeMoZ/lAeZWT
-IbzcnkneOzE25m+bjw3r3WlR26amhyrWNwjGzRkgfEpw336kniX/GmwaCNgdNk+g
-5aIbVxIHO0DbgkDBtdljR3VOic4djW/LtUIYIQ2egnPPyRR3fcFI+x5EQdVQYUXf
-jpGIwovUDyG0Lkam2tpdeEXvLMZr8+Lhzu+H6vUFSj3cz6gcw/Xepw40FOkYdAI9
-LYB6nwpZLTVaOqZCJ2ECAwEAAaOCAQkwggEFMA4GA1UdDwEB/wQEAwIAoDATBgNV
-HSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMIHPBgNVHREEgccwgcSCCVVi
-dW50dVByb4IRMTAuMTY3LjE2MC4xODMvMjSCHzIwMDE6MTVjMDo2NzM1OmVlMDA6
-OmU6ZTMxMy8xMjiCKWZkNTc6Yzg3ZDpmMWVlOmVlMDA6MjFkOjdkZmY6ZmUwOToz
-NzUzLzY0gikyMDAxOjE1YzA6NjczNTplZTAwOjIxZDo3ZGZmOmZlMDk6Mzc1My82
-NIIbZmU4MDo6MjFkOjdkZmY6ZmUwOTozNzUzLzY0ghAxOTIuMTY4LjEyMi4xLzI0
-MAsGCSqGSIb3DQEBCwOCAgEAmcJUSBH7cLw3auEEV1KewtdqY1ARVB/pafAtbe9F
-7ZKBbxUcS7cP3P1hRs5FH1bH44bIJKHxckctNUPqvC+MpXSryKinQ5KvGPNjGdlW
-6EPlQr23btizC6hRdQ6RjEkCnQxhyTLmQ9n78nt47hjA96rFAhCUyfPdv9dI4Zux
-bBTJekhCx5taamQKoxr7tql4Y2TchVlwASZvOfar8I0GxBRFT8w9IjckOSLoT9/s
-OhlvXpeoxxFT7OHwqXEXdRUvw/8MGBo6JDnw+J/NGDBw3Z0goebG4FMT//xGSHia
-czl3A0M0flk4/45L7N6vctwSqi+NxVaJRKeiYPZyzOO9K/d+No+WVBPwKmyP8icQ
-b7FGTelPJOUolC6kmoyM+vyaNUoU4nz6lgOSHAtuqGNDWZWuX/gqzZw77hzDIgkN
-qisOHZWPVlG/iUh1JBkbglBaPeaa3zf0XwSdgwwf4v8Z+YtEiRqkuFgQY70eQKI/
-CIkj1p0iW5IBEsEAGUGklz4ZwqJwH3lQIqDBzIgHe3EP4cXaYsx6oYhPSDdHLPv4
-HMZhl05DP75CEkEWRD0AIaL7SHdyuYUmCZ2zdrMI7TEDrAqcUuPbYpHcdJ2wnYmi
-2G8XHJibfu4PCpIm1J8kPL8rqpdgW3moKR8Mp0HJQOH4tSBr1Ep7xNLP1wg6PIe+
-p7U=
------END CERTIFICATE-----
-`)
-
-var testKeyPEMBlock = []byte(`-----BEGIN RSA PRIVATE KEY-----
-MIIJKAIBAAKCAgEAyViJkCzoxa1NYilXqGJog6xzlSm4xt8KIzayc0JdB9VxEdIV
-dJqUzBAUtyCS4KZ9MbPmMEOX9NbBASL0tRK58/7KScq99Kj4XbVMLU1P/y5aW0ym
-nF0OpKbG6unmgAI2k/duRlbYHvGRdhlswpKl0Ystl8i2kXOK0Rxcz90FewcEXGSn
-IYW21sz8YpBLfIZqOx6XEV36mOdi3MLrhUSAhXDwPay33Y7NonCQUBtiO7BT938c
-qI14FJrWdKon1UnODtzONcVBLTWtoe7D41+mx7EETaq5OPxBSe0DD6KQcPOZ7ZSJ
-EhIqVKMvzLyiOJpyShmhm4OuGNoAG6jAuSij/9KcaLU4IitcrvFOuAo8M9OpiY9Z
-CR7Gb/qaPAXPAxE7Ci3f9DDNKXtPXDjhj3YG01+hfNXMW3kCkMImn0A/+mZUMdCL
-87GWN2AN3Do5qaIc5XVEt1gp+LVqJeMoZ/lAeZWTIbzcnkneOzE25m+bjw3r3WlR
-26amhyrWNwjGzRkgfEpw336kniX/GmwaCNgdNk+g5aIbVxIHO0DbgkDBtdljR3VO
-ic4djW/LtUIYIQ2egnPPyRR3fcFI+x5EQdVQYUXfjpGIwovUDyG0Lkam2tpdeEXv
-LMZr8+Lhzu+H6vUFSj3cz6gcw/Xepw40FOkYdAI9LYB6nwpZLTVaOqZCJ2ECAwEA
-AQKCAgBCe8GwoaOa4kaTCyOurg/kqqTftA8XW751MjJqbJdbZtcXE0+SWRiY6RZu
-AYt+MntUVhrEBQ3AAsloHqq+v5g3QQJ6qz9d8g1Qo/SrYMPxdtTPINhC+VdEdu1n
-1CQQUKrE4QbAoxxp20o0vOB0vweR0WsUm2ntTUGhGsRqvoh4vzBpcbLeFtDwzG7p
-/MtwKtIZA1jOm0GMC5tRWet67cuiRFCPjOCJgAXWhWShjuk43FhdeNN1tIDaDOaT
-Tzwn6V7o+W/9wUxsKTVUKwrzoTno5kKNgrn2XxUP2/sOxpb7NPS2xj0cgnMHz3qR
-GBhYqGbkoOID/88U1acDew1oFktQL24yd8/cvooh7KLN3k5oSKjpKmGAKaMMwsSv
-ccRSM9EkTtgTANLpSFiVF738drZw7UXUsvVTCF8WHhMtGD50XOahR02D1kZnpqpe
-SdxJ9qFNEeozk6w56cTerJNz4od18/gQtNADcPI6WE+8NBrqYjN/X4CBNS76IEtp
-5ddGbi6+4HgO5B0pU87f2bZH4BwR8XJ07wdMRyXXhmnKcnirkyqUtgHmLF3LZnGX
-+Fph5KmhBGs/ZovBvnBI2nREsMfNvzffK7x3hyFXv6J+XxILk4i3LkgKLJFC+RY0
-sjWNQB5tHuA1dbq3AtsbfJcTK764kSaUsq0JoqPQgiSuiNoCIQKCAQEA1Fk4SR5I
-H1QHlXeQ/k1sg6B5H0uosPAnAQxjuI8SvYkty+b4diP+CJIS4IphgLIItROORUFE
-bOi6pj2D2oK04J55fhlJaE8LQs7i90nFXT4B09Ut4oBYGCz5aE/wAUxUanaq1dxj
-K17y+ejlqh7yKTwupHOvIm4ddDwU1U5H9J/Cyywvp5fznVIGMJynVk7zriXYM6aC
-tioNCbOTHwQxjYEaG3AwymXaI6sNwdNiAzgq6M7v43GF3IOj8SYK2VhVdLqLJPnL
-6G5OqMRxxQtxOcSctFOuicu+Jq/KVWJGDaERQZJloHcBJCtO34ONswGJqC/PGoU+
-Ny/BOaZdLQDIpwKCAQEA8rxOKaLuOWEi4MDJuAgQYqpO9JxY0h3yN1YrspBuGezR
-4Lzdh0vUh9Jr4npV723gGwA7r8AcqIPZvSk8MmcYVuwoxz9VWYeNP8P6cRc3bDO8
-shnSvFxV32gKTEH8fOH3/BlJOnbn62tebSFHnGxyh2WPsRbzAMOKj9Q3Yq6ad3DD
-6rJhtopIedC3AWc3aVeO2FHPC+Lza0PhUVsHf5X7Bg+zQlHaaEXB0lysruXkDlU9
-WdW+Ajvo0enhOROgEa7QBC74NsKZF4KJGMGTaglydRtVYbqfx4QbfgDU5h2zaUnB
-lRINZvKNYGRXDN944ymynE9bo4xfOERbWc68GFaItwKCAQBCY+qvIaKW+OSuHIXe
-nEJTHPcBi9wgBdWMBF2hNEo9rAf/eiUweqxP7autPFajsAX85zJSAMft7Q1+MDlr
-NfZrS+DcRfenfx8cMibP/eaQ8nQL0NjZuhrQ5C7OKD/3h+/UoWlkF9WBl9wLun8j
-oy0/KyvCCtE0yIy47Jfu4NyqZNC4SQZVNbLa+uwogrHm0CRrzDU+YM75OUh+QgC7
-b8o2XajV70ux3ApJoI9ajEZWj1cLFrf1umaJvTaijKxTq8R8DF64nsjb0LETHugb
-HSq3TvtXfdpSBrtayRdPfrw8QqFsiOLxOoPG1SuBwlWpI8/wH5J2zjXXdzzIU3VK
-PrZ9AoIBAQDazTjbuT1pxZCN7donJEW42nHPdvttc4b5sJg1HpHQlrNdFIHPyl/q
-iperD8FU0MM5M42Zz99FW4yzQW88s8ex2rCrYgCKcnC1cO/YbygLRduq4zIdjlHt
-zrexo6132K0TtqtWowZNJHx6fIwziWH3gGn1JI2pO5o0KgQ+1MryLVi8v0zrIV1R
-SP0dq6+8Kivd/GhY+5uWLhr1nct1i3k6Ln7Uojnw0ihzegxCn4FiFh32U4AyPVSR
-m3PkYjdgmSZzDu+5VNJw6b6w7RT3eUqOGzRsorASRZgOjatbPpyRpOV1fU9NZAhi
-QjBhrzMl+VlCIxqkowzWCHAb1QmiGqajAoIBAGYKD5h7jTgPFKFlMViTg8LoMcQl
-9vbpmWkB+WdY5xXOwO0hO99rFDmLx6elsmYjdpq8zJkOFTnSB2o3IpenxZltNMsI
-+aDlZWxDxokTxr6gbQPPrjePT1oON0/6sLEYkDOln8H1P9jmLPqTrET0DxCMgE5D
-NE9TAEuUKVhRTWy6FSdP58hUimyVnlbnvbGOh2tviNO+TK/H7k0WjRg57Sz9XTHO
-q36ob5TEsQngkTATEoksE9xhXFxtmTm/nu/26wN2Py49LSwu2aAYTfX/KhQKklNX
-P/tP5//z+hGeba8/xv8YhEr7vhbnlBdwp0wHJj5g7nHAbYfo9ELbXSON8wc=
------END RSA PRIVATE KEY-----
-`)
-
-var testAltCertPEMBlock = []byte(`-----BEGIN CERTIFICATE-----
-MIICEzCCAXygAwIBAgIQMIMChMLGrR+QvmQvpwAU6zANBgkqhkiG9w0BAQsFADAS
-MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
-MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB
-iQKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9SjY1bIw4
-iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZBl2+XsDul
-rKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQABo2gwZjAO
-BgNVHQ8BAf8EBAMCAqQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0TAQH/BAUw
-AwEB/zAuBgNVHREEJzAlggtleGFtcGxlLmNvbYcEfwAAAYcQAAAAAAAAAAAAAAAA
-AAAAATANBgkqhkiG9w0BAQsFAAOBgQCEcetwO59EWk7WiJsG4x8SY+UIAA+flUI9
-tyC4lNhbcF2Idq9greZwbYCqTTTr2XiRNSMLCOjKyI7ukPoPjo16ocHj+P3vZGfs
-h1fIw3cSS2OolhloGw/XM6RWPWtPAlGykKLciQrBru5NAPvCMsb/I1DAceTiotQM
-fblo6RBxUQ==
------END CERTIFICATE-----`)
-
-var testAltKeyPEMBlock = []byte(`-----BEGIN RSA PRIVATE KEY-----
-MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
-SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZB
-l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
-AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
-3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
-uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
-qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
-jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
-fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
-fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
-y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX
-qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
-f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
------END RSA PRIVATE KEY-----`)
diff --git a/vendor/github.com/lxc/lxd/shared/cgo.go b/vendor/github.com/lxc/lxd/shared/cgo.go
deleted file mode 100644
index 898f41e94fb7..000000000000
--- a/vendor/github.com/lxc/lxd/shared/cgo.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build linux,cgo
-
-package shared
-
-// #cgo CFLAGS: -std=gnu11 -Wvla -Werror -fvisibility=hidden -Winit-self
-// #cgo CFLAGS: -Wformat=2 -Wshadow -Wendif-labels -fasynchronous-unwind-tables
-// #cgo CFLAGS: -pipe --param=ssp-buffer-size=4 -g -Wunused
-// #cgo CFLAGS: -Werror=implicit-function-declaration
-// #cgo CFLAGS: -Werror=return-type -Wendif-labels -Werror=overflow
-// #cgo CFLAGS: -Wnested-externs -fexceptions
-// #cgo LDFLAGS: -lutil -lpthread
-import "C"
diff --git a/vendor/github.com/lxc/lxd/shared/container.go b/vendor/github.com/lxc/lxd/shared/container.go
deleted file mode 100644
index 869c6c1fc5ae..000000000000
--- a/vendor/github.com/lxc/lxd/shared/container.go
+++ /dev/null
@@ -1,425 +0,0 @@
-package shared
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "github.com/pkg/errors"
- "gopkg.in/robfig/cron.v2"
-
- "github.com/lxc/lxd/shared/units"
-)
-
-type ContainerAction string
-
-const (
- Stop ContainerAction = "stop"
- Start ContainerAction = "start"
- Restart ContainerAction = "restart"
- Freeze ContainerAction = "freeze"
- Unfreeze ContainerAction = "unfreeze"
-)
-
-func IsInt64(value string) error {
- if value == "" {
- return nil
- }
-
- _, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- return fmt.Errorf("Invalid value for an integer: %s", value)
- }
-
- return nil
-}
-
-func IsUint8(value string) error {
- if value == "" {
- return nil
- }
-
- _, err := strconv.ParseUint(value, 10, 8)
- if err != nil {
- return fmt.Errorf("Invalid value for an integer: %s. Must be between 0 and 255", value)
- }
-
- return nil
-}
-
-func IsUint32(value string) error {
- if value == "" {
- return nil
- }
-
- _, err := strconv.ParseUint(value, 10, 32)
- if err != nil {
- return fmt.Errorf("Invalid value for uint32: %s: %v", value, err)
- }
-
- return nil
-}
-
-func IsPriority(value string) error {
- if value == "" {
- return nil
- }
-
- valueInt, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- return fmt.Errorf("Invalid value for an integer: %s", value)
- }
-
- if valueInt < 0 || valueInt > 10 {
- return fmt.Errorf("Invalid value for a limit '%s'. Must be between 0 and 10", value)
- }
-
- return nil
-}
-
-func IsBool(value string) error {
- if value == "" {
- return nil
- }
-
- if !StringInSlice(strings.ToLower(value), []string{"true", "false", "yes", "no", "1", "0", "on", "off"}) {
- return fmt.Errorf("Invalid value for a boolean: %s", value)
- }
-
- return nil
-}
-
-func IsOneOf(value string, valid []string) error {
- if value == "" {
- return nil
- }
-
- if !StringInSlice(value, valid) {
- return fmt.Errorf("Invalid value: %s (not one of %s)", value, valid)
- }
-
- return nil
-}
-
-func IsAny(value string) error {
- return nil
-}
-
-func IsNotEmpty(value string) error {
- if value == "" {
- return fmt.Errorf("Required value")
- }
-
- return nil
-}
-
-// IsDeviceID validates string is four lowercase hex characters suitable as Vendor or Device ID.
-func IsDeviceID(value string) error {
- if value == "" {
- return nil
- }
-
- regexHexLc, err := regexp.Compile("^[0-9a-f]+$")
- if err != nil {
- return err
- }
-
- if len(value) != 4 || !regexHexLc.MatchString(value) {
- return fmt.Errorf("Invalid value, must be four lower case hex characters")
- }
-
- return nil
-}
-
-// IsRootDiskDevice returns true if the given device representation is configured as root disk for
-// a container. It typically get passed a specific entry of api.Container.Devices.
-func IsRootDiskDevice(device map[string]string) bool {
- // Root disk devices also need a non-empty "pool" property, but we can't check that here
- // because this function is used with clients talking to older servers where there was no
- // concept of a storage pool, and also it is used for migrating from old to new servers.
- // The validation of the non-empty "pool" property is done inside the disk device itself.
- if device["type"] == "disk" && device["path"] == "/" && device["source"] == "" {
- return true
- }
-
- return false
-}
-
-// GetRootDiskDevice returns the container device that is configured as root disk
-func GetRootDiskDevice(devices map[string]map[string]string) (string, map[string]string, error) {
- var devName string
- var dev map[string]string
-
- for n, d := range devices {
- if IsRootDiskDevice(d) {
- if devName != "" {
- return "", nil, fmt.Errorf("More than one root device found")
- }
-
- devName = n
- dev = d
- }
- }
-
- if devName != "" {
- return devName, dev, nil
- }
-
- return "", nil, fmt.Errorf("No root device could be found")
-}
-
-// KnownContainerConfigKeys maps all fully defined, well-known config keys
-// to an appropriate checker function, which validates whether or not a
-// given value is syntactically legal.
-var KnownContainerConfigKeys = map[string]func(value string) error{
- "boot.autostart": IsBool,
- "boot.autostart.delay": IsInt64,
- "boot.autostart.priority": IsInt64,
- "boot.stop.priority": IsInt64,
- "boot.host_shutdown_timeout": IsInt64,
-
- "limits.cpu": func(value string) error {
- if value == "" {
- return nil
- }
-
- // Validate the character set
- match, _ := regexp.MatchString("^[-,0-9]*$", value)
- if !match {
- return fmt.Errorf("Invalid CPU limit syntax")
- }
-
- // Validate first character
- if strings.HasPrefix(value, "-") || strings.HasPrefix(value, ",") {
- return fmt.Errorf("CPU limit can't start with a separator")
- }
-
- // Validate last character
- if strings.HasSuffix(value, "-") || strings.HasSuffix(value, ",") {
- return fmt.Errorf("CPU limit can't end with a separator")
- }
-
- return nil
- },
- "limits.cpu.allowance": func(value string) error {
- if value == "" {
- return nil
- }
-
- if strings.HasSuffix(value, "%") {
- // Percentage based allocation
- _, err := strconv.Atoi(strings.TrimSuffix(value, "%"))
- if err != nil {
- return err
- }
-
- return nil
- }
-
- // Time based allocation
- fields := strings.SplitN(value, "/", 2)
- if len(fields) != 2 {
- return fmt.Errorf("Invalid allowance: %s", value)
- }
-
- _, err := strconv.Atoi(strings.TrimSuffix(fields[0], "ms"))
- if err != nil {
- return err
- }
-
- _, err = strconv.Atoi(strings.TrimSuffix(fields[1], "ms"))
- if err != nil {
- return err
- }
-
- return nil
- },
- "limits.cpu.priority": IsPriority,
-
- "limits.disk.priority": IsPriority,
-
- "limits.memory": func(value string) error {
- if value == "" {
- return nil
- }
-
- if strings.HasSuffix(value, "%") {
- _, err := strconv.ParseInt(strings.TrimSuffix(value, "%"), 10, 64)
- if err != nil {
- return err
- }
-
- return nil
- }
-
- _, err := units.ParseByteSizeString(value)
- if err != nil {
- return err
- }
-
- return nil
- },
- "limits.memory.enforce": func(value string) error {
- return IsOneOf(value, []string{"soft", "hard"})
- },
- "limits.memory.swap": IsBool,
- "limits.memory.swap.priority": IsPriority,
-
- "limits.network.priority": IsPriority,
-
- "limits.processes": IsInt64,
-
- "linux.kernel_modules": IsAny,
-
- "migration.incremental.memory": IsBool,
- "migration.incremental.memory.iterations": IsUint32,
- "migration.incremental.memory.goal": IsUint32,
-
- "nvidia.runtime": IsBool,
- "nvidia.driver.capabilities": IsAny,
- "nvidia.require.cuda": IsAny,
- "nvidia.require.driver": IsAny,
-
- "security.nesting": IsBool,
- "security.privileged": IsBool,
- "security.devlxd": IsBool,
- "security.devlxd.images": IsBool,
-
- "security.protection.delete": IsBool,
- "security.protection.shift": IsBool,
-
- "security.idmap.base": IsUint32,
- "security.idmap.isolated": IsBool,
- "security.idmap.size": IsUint32,
-
- "security.syscalls.blacklist_default": IsBool,
- "security.syscalls.blacklist_compat": IsBool,
- "security.syscalls.blacklist": IsAny,
- "security.syscalls.intercept.mknod": IsBool,
- "security.syscalls.intercept.mount": IsBool,
- "security.syscalls.intercept.mount.allowed": IsAny,
- "security.syscalls.intercept.mount.shift": IsBool,
- "security.syscalls.intercept.setxattr": IsBool,
- "security.syscalls.whitelist": IsAny,
-
- "snapshots.schedule": func(value string) error {
- if value == "" {
- return nil
- }
-
- if len(strings.Split(value, " ")) != 5 {
- return fmt.Errorf("Schedule must be of the form: ")
- }
-
- _, err := cron.Parse(fmt.Sprintf("* %s", value))
- if err != nil {
- return errors.Wrap(err, "Error parsing schedule")
- }
-
- return nil
- },
- "snapshots.schedule.stopped": IsBool,
- "snapshots.pattern": IsAny,
- "snapshots.expiry": func(value string) error {
- // Validate expression
- _, err := GetSnapshotExpiry(time.Time{}, value)
- return err
- },
-
- // Caller is responsible for full validation of any raw.* value
- "raw.apparmor": IsAny,
- "raw.lxc": IsAny,
- "raw.seccomp": IsAny,
- "raw.idmap": IsAny,
-
- "volatile.apply_template": IsAny,
- "volatile.base_image": IsAny,
- "volatile.last_state.idmap": IsAny,
- "volatile.last_state.power": IsAny,
- "volatile.idmap.base": IsAny,
- "volatile.idmap.current": IsAny,
- "volatile.idmap.next": IsAny,
- "volatile.apply_quota": IsAny,
-}
-
-// ConfigKeyChecker returns a function that will check whether or not
-// a provide value is valid for the associate config key. Returns an
-// error if the key is not known. The checker function only performs
-// syntactic checking of the value, semantic and usage checking must
-// be done by the caller. User defined keys are always considered to
-// be valid, e.g. user.* and environment.* keys.
-func ConfigKeyChecker(key string) (func(value string) error, error) {
- if f, ok := KnownContainerConfigKeys[key]; ok {
- return f, nil
- }
-
- if strings.HasPrefix(key, "volatile.") {
- if strings.HasSuffix(key, ".hwaddr") {
- return IsAny, nil
- }
-
- if strings.HasSuffix(key, ".name") {
- return IsAny, nil
- }
-
- if strings.HasSuffix(key, ".host_name") {
- return IsAny, nil
- }
-
- if strings.HasSuffix(key, ".mtu") {
- return IsAny, nil
- }
-
- if strings.HasSuffix(key, ".created") {
- return IsAny, nil
- }
-
- if strings.HasSuffix(key, ".id") {
- return IsAny, nil
- }
-
- if strings.HasSuffix(key, ".vlan") {
- return IsAny, nil
- }
-
- if strings.HasSuffix(key, ".spoofcheck") {
- return IsAny, nil
- }
-
- if strings.HasSuffix(key, ".apply_quota") {
- return IsAny, nil
- }
- }
-
- if strings.HasPrefix(key, "environment.") {
- return IsAny, nil
- }
-
- if strings.HasPrefix(key, "user.") {
- return IsAny, nil
- }
-
- if strings.HasPrefix(key, "image.") {
- return IsAny, nil
- }
-
- if strings.HasPrefix(key, "limits.kernel.") &&
- (len(key) > len("limits.kernel.")) {
- return IsAny, nil
- }
-
- return nil, fmt.Errorf("Unknown configuration key: %s", key)
-}
-
-// ContainerGetParentAndSnapshotName returns the parent container name, snapshot
-// name, and whether it actually was a snapshot name.
-func ContainerGetParentAndSnapshotName(name string) (string, string, bool) {
- fields := strings.SplitN(name, SnapshotDelimiter, 2)
- if len(fields) == 1 {
- return name, "", false
- }
-
- return fields[0], fields[1], true
-}
diff --git a/vendor/github.com/lxc/lxd/shared/eagain/file_unix.go b/vendor/github.com/lxc/lxd/shared/eagain/file_unix.go
deleted file mode 100644
index bd671df90658..000000000000
--- a/vendor/github.com/lxc/lxd/shared/eagain/file_unix.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package eagain
-
-import (
- "io"
-
- "golang.org/x/sys/unix"
-
- "github.com/lxc/lxd/shared"
-)
-
-// Reader represents an io.Reader that handles EAGAIN
-type Reader struct {
- Reader io.Reader
-}
-
-// Read behaves like io.Reader.Read but will retry on EAGAIN
-func (er Reader) Read(p []byte) (int, error) {
-again:
- n, err := er.Reader.Read(p)
- if err == nil {
- return n, nil
- }
-
- // keep retrying on EAGAIN
- errno, ok := shared.GetErrno(err)
- if ok && (errno == unix.EAGAIN || errno == unix.EINTR) {
- goto again
- }
-
- return n, err
-}
-
-// Writer represents an io.Writer that handles EAGAIN
-type Writer struct {
- Writer io.Writer
-}
-
-// Write behaves like io.Writer.Write but will retry on EAGAIN
-func (ew Writer) Write(p []byte) (int, error) {
-again:
- n, err := ew.Writer.Write(p)
- if err == nil {
- return n, nil
- }
-
- // keep retrying on EAGAIN
- errno, ok := shared.GetErrno(err)
- if ok && (errno == unix.EAGAIN || errno == unix.EINTR) {
- goto again
- }
-
- return n, err
-}
diff --git a/vendor/github.com/lxc/lxd/shared/ioprogress/data.go b/vendor/github.com/lxc/lxd/shared/ioprogress/data.go
deleted file mode 100644
index 59a790588989..000000000000
--- a/vendor/github.com/lxc/lxd/shared/ioprogress/data.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package ioprogress
-
-// The ProgressData struct represents new progress information on an operation
-type ProgressData struct {
- // Preferred string repreentation of progress (always set)
- Text string
-
- // Progress in percent
- Percentage int
-
- // Number of bytes transferred (for files)
- TransferredBytes int64
-
- // Total number of bytes (for files)
- TotalBytes int64
-}
diff --git a/vendor/github.com/lxc/lxd/shared/ioprogress/reader.go b/vendor/github.com/lxc/lxd/shared/ioprogress/reader.go
deleted file mode 100644
index 299cb6b29f14..000000000000
--- a/vendor/github.com/lxc/lxd/shared/ioprogress/reader.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package ioprogress
-
-import (
- "io"
-)
-
-// ProgressReader is a wrapper around ReadCloser which allows for progress tracking
-type ProgressReader struct {
- io.ReadCloser
- Tracker *ProgressTracker
-}
-
-// Read in ProgressReader is the same as io.Read
-func (pt *ProgressReader) Read(p []byte) (int, error) {
- // Do normal reader tasks
- n, err := pt.ReadCloser.Read(p)
-
- // Do the actual progress tracking
- if pt.Tracker != nil {
- pt.Tracker.total += int64(n)
- pt.Tracker.update(n)
- }
-
- return n, err
-}
diff --git a/vendor/github.com/lxc/lxd/shared/ioprogress/tracker.go b/vendor/github.com/lxc/lxd/shared/ioprogress/tracker.go
deleted file mode 100644
index 494f4b66c4d5..000000000000
--- a/vendor/github.com/lxc/lxd/shared/ioprogress/tracker.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package ioprogress
-
-import (
- "time"
-)
-
-// ProgressTracker provides the stream information needed for tracking
-type ProgressTracker struct {
- Length int64
- Handler func(int64, int64)
-
- percentage float64
- total int64
- start *time.Time
- last *time.Time
-}
-
-func (pt *ProgressTracker) update(n int) {
- // Skip the rest if no handler attached
- if pt.Handler == nil {
- return
- }
-
- // Initialize start time if needed
- if pt.start == nil {
- cur := time.Now()
- pt.start = &cur
- pt.last = pt.start
- }
-
- // Skip if no data to count
- if n <= 0 {
- return
- }
-
- // Update interval handling
- var percentage float64
- if pt.Length > 0 {
- // If running in relative mode, check that we increased by at least 1%
- percentage = float64(pt.total) / float64(pt.Length) * float64(100)
- if percentage-pt.percentage < 0.9 {
- return
- }
- } else {
- // If running in absolute mode, check that at least a second elapsed
- interval := time.Since(*pt.last).Seconds()
- if interval < 1 {
- return
- }
- }
-
- // Determine speed
- speedInt := int64(0)
- duration := time.Since(*pt.start).Seconds()
- if duration > 0 {
- speed := float64(pt.total) / duration
- speedInt = int64(speed)
- }
-
- // Determine progress
- var progressInt int64
- if pt.Length > 0 {
- pt.percentage = percentage
- progressInt = int64(1 - (int(percentage) % 1) + int(percentage))
- if progressInt > 100 {
- progressInt = 100
- }
- } else {
- progressInt = pt.total
-
- // Update timestamp
- cur := time.Now()
- pt.last = &cur
- }
-
- pt.Handler(progressInt, speedInt)
-}
diff --git a/vendor/github.com/lxc/lxd/shared/ioprogress/writer.go b/vendor/github.com/lxc/lxd/shared/ioprogress/writer.go
deleted file mode 100644
index f45b45e8b921..000000000000
--- a/vendor/github.com/lxc/lxd/shared/ioprogress/writer.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package ioprogress
-
-import (
- "io"
-)
-
-// ProgressWriter is a wrapper around WriteCloser which allows for progress tracking
-type ProgressWriter struct {
- io.WriteCloser
- Tracker *ProgressTracker
-}
-
-// Write in ProgressWriter is the same as io.Write
-func (pt *ProgressWriter) Write(p []byte) (int, error) {
- // Do normal writer tasks
- n, err := pt.WriteCloser.Write(p)
-
- // Do the actual progress tracking
- if pt.Tracker != nil {
- pt.Tracker.total += int64(n)
- pt.Tracker.update(n)
- }
-
- return n, err
-}
diff --git a/vendor/github.com/lxc/lxd/shared/json.go b/vendor/github.com/lxc/lxd/shared/json.go
deleted file mode 100644
index 09f106665395..000000000000
--- a/vendor/github.com/lxc/lxd/shared/json.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package shared
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
-
- "github.com/lxc/lxd/shared/logger"
-)
-
-type Jmap map[string]interface{}
-
-func (m Jmap) GetString(key string) (string, error) {
- if val, ok := m[key]; !ok {
- return "", fmt.Errorf("Response was missing `%s`", key)
- } else if val, ok := val.(string); !ok {
- return "", fmt.Errorf("`%s` was not a string", key)
- } else {
- return val, nil
- }
-}
-
-func (m Jmap) GetMap(key string) (Jmap, error) {
- if val, ok := m[key]; !ok {
- return nil, fmt.Errorf("Response was missing `%s`", key)
- } else if val, ok := val.(map[string]interface{}); !ok {
- return nil, fmt.Errorf("`%s` was not a map, got %T", key, m[key])
- } else {
- return val, nil
- }
-}
-
-func (m Jmap) GetInt(key string) (int, error) {
- if val, ok := m[key]; !ok {
- return -1, fmt.Errorf("Response was missing `%s`", key)
- } else if val, ok := val.(float64); !ok {
- return -1, fmt.Errorf("`%s` was not an int", key)
- } else {
- return int(val), nil
- }
-}
-
-func (m Jmap) GetBool(key string) (bool, error) {
- if val, ok := m[key]; !ok {
- return false, fmt.Errorf("Response was missing `%s`", key)
- } else if val, ok := val.(bool); !ok {
- return false, fmt.Errorf("`%s` was not an int", key)
- } else {
- return val, nil
- }
-}
-
-func DebugJson(r *bytes.Buffer) {
- pretty := &bytes.Buffer{}
- if err := json.Indent(pretty, r.Bytes(), "\t", "\t"); err != nil {
- logger.Debugf("error indenting json: %s", err)
- return
- }
-
- // Print the JSON without the last "\n"
- str := pretty.String()
- logger.Debugf("\n\t%s", str[0:len(str)-1])
-}
diff --git a/vendor/github.com/lxc/lxd/shared/logger/format.go b/vendor/github.com/lxc/lxd/shared/logger/format.go
deleted file mode 100644
index dcd11d3d197c..000000000000
--- a/vendor/github.com/lxc/lxd/shared/logger/format.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package logger
-
-import (
- "encoding/json"
- "fmt"
- "runtime"
-)
-
-// Pretty will attempt to convert any Go structure into a string suitable for logging
-func Pretty(input interface{}) string {
- pretty, err := json.MarshalIndent(input, "\t", "\t")
- if err != nil {
- return fmt.Sprintf("%v", input)
- }
-
- return fmt.Sprintf("\n\t%s", pretty)
-}
-
-// GetStack will convert the Go stack into a string suitable for logging
-func GetStack() string {
- buf := make([]byte, 1<<16)
- n := runtime.Stack(buf, true)
-
- return fmt.Sprintf("\n\t%s", buf[:n])
-}
diff --git a/vendor/github.com/lxc/lxd/shared/logger/log.go b/vendor/github.com/lxc/lxd/shared/logger/log.go
deleted file mode 100644
index a031d8c14cc7..000000000000
--- a/vendor/github.com/lxc/lxd/shared/logger/log.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// +build !logdebug
-
-package logger
-
-import (
- "fmt"
-)
-
-// Logger is the main logging interface
-type Logger interface {
- Debug(msg string, ctx ...interface{})
- Info(msg string, ctx ...interface{})
- Warn(msg string, ctx ...interface{})
- Error(msg string, ctx ...interface{})
- Crit(msg string, ctx ...interface{})
-}
-
-// Log contains the logger used by all the logging functions
-var Log Logger
-
-type nullLogger struct{}
-
-func (nl nullLogger) Debug(msg string, ctx ...interface{}) {}
-func (nl nullLogger) Info(msg string, ctx ...interface{}) {}
-func (nl nullLogger) Warn(msg string, ctx ...interface{}) {}
-func (nl nullLogger) Error(msg string, ctx ...interface{}) {}
-func (nl nullLogger) Crit(msg string, ctx ...interface{}) {}
-
-func init() {
- Log = nullLogger{}
-}
-
-// Debug logs a message (with optional context) at the DEBUG log level
-func Debug(msg string, ctx ...interface{}) {
- if Log != nil {
- Log.Debug(msg, ctx...)
- }
-}
-
-// Info logs a message (with optional context) at the INFO log level
-func Info(msg string, ctx ...interface{}) {
- if Log != nil {
- Log.Info(msg, ctx...)
- }
-}
-
-// Warn logs a message (with optional context) at the WARNING log level
-func Warn(msg string, ctx ...interface{}) {
- if Log != nil {
- Log.Warn(msg, ctx...)
- }
-}
-
-// Error logs a message (with optional context) at the ERROR log level
-func Error(msg string, ctx ...interface{}) {
- if Log != nil {
- Log.Error(msg, ctx...)
- }
-}
-
-// Crit logs a message (with optional context) at the CRITICAL log level
-func Crit(msg string, ctx ...interface{}) {
- if Log != nil {
- Log.Crit(msg, ctx...)
- }
-}
-
-// Infof logs at the INFO log level using a standard printf format string
-func Infof(format string, args ...interface{}) {
- if Log != nil {
- Log.Info(fmt.Sprintf(format, args...))
- }
-}
-
-// Debugf logs at the DEBUG log level using a standard printf format string
-func Debugf(format string, args ...interface{}) {
- if Log != nil {
- Log.Debug(fmt.Sprintf(format, args...))
- }
-}
-
-// Warnf logs at the WARNING log level using a standard printf format string
-func Warnf(format string, args ...interface{}) {
- if Log != nil {
- Log.Warn(fmt.Sprintf(format, args...))
- }
-}
-
-// Errorf logs at the ERROR log level using a standard printf format string
-func Errorf(format string, args ...interface{}) {
- if Log != nil {
- Log.Error(fmt.Sprintf(format, args...))
- }
-}
-
-// Critf logs at the CRITICAL log level using a standard printf format string
-func Critf(format string, args ...interface{}) {
- if Log != nil {
- Log.Crit(fmt.Sprintf(format, args...))
- }
-}
diff --git a/vendor/github.com/lxc/lxd/shared/logger/log_debug.go b/vendor/github.com/lxc/lxd/shared/logger/log_debug.go
deleted file mode 100644
index 49185537b38c..000000000000
--- a/vendor/github.com/lxc/lxd/shared/logger/log_debug.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// +build logdebug
-
-package logger
-
-import (
- "fmt"
- "runtime"
-)
-
-type Logger interface {
- Debug(msg string, ctx ...interface{})
- Info(msg string, ctx ...interface{})
- Warn(msg string, ctx ...interface{})
- Error(msg string, ctx ...interface{})
- Crit(msg string, ctx ...interface{})
-}
-
-var Log Logger
-
-type nullLogger struct{}
-
-func (nl nullLogger) Debug(msg string, ctx ...interface{}) {}
-func (nl nullLogger) Info(msg string, ctx ...interface{}) {}
-func (nl nullLogger) Warn(msg string, ctx ...interface{}) {}
-func (nl nullLogger) Error(msg string, ctx ...interface{}) {}
-func (nl nullLogger) Crit(msg string, ctx ...interface{}) {}
-
-func init() {
- Log = nullLogger{}
-}
-
-// General wrappers around Logger interface functions.
-func Debug(msg string, ctx ...interface{}) {
- if Log != nil {
- pc, fn, line, _ := runtime.Caller(1)
- msg := fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Debug(msg, ctx...)
- }
-}
-
-func Info(msg string, ctx ...interface{}) {
- if Log != nil {
- pc, fn, line, _ := runtime.Caller(1)
- msg := fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Info(msg, ctx...)
- }
-}
-
-func Warn(msg string, ctx ...interface{}) {
- if Log != nil {
- pc, fn, line, _ := runtime.Caller(1)
- msg := fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Warn(msg, ctx...)
- }
-}
-
-func Error(msg string, ctx ...interface{}) {
- if Log != nil {
- pc, fn, line, _ := runtime.Caller(1)
- msg := fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Error(msg, ctx...)
- }
-}
-
-func Crit(msg string, ctx ...interface{}) {
- if Log != nil {
- pc, fn, line, _ := runtime.Caller(1)
- msg := fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Crit(msg, ctx...)
- }
-}
-
-// Wrappers around Logger interface functions that send a string to the Logger
-// by running it through fmt.Sprintf().
-func Infof(format string, args ...interface{}) {
- if Log != nil {
- msg := fmt.Sprintf(format, args...)
- pc, fn, line, _ := runtime.Caller(1)
- msg = fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Info(msg)
- }
-}
-
-func Debugf(format string, args ...interface{}) {
- if Log != nil {
- msg := fmt.Sprintf(format, args...)
- pc, fn, line, _ := runtime.Caller(1)
- msg = fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Debug(msg)
- }
-}
-
-func Warnf(format string, args ...interface{}) {
- if Log != nil {
- msg := fmt.Sprintf(format, args...)
- pc, fn, line, _ := runtime.Caller(1)
- msg = fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Warn(msg)
- }
-}
-
-func Errorf(format string, args ...interface{}) {
- if Log != nil {
- msg := fmt.Sprintf(format, args...)
- pc, fn, line, _ := runtime.Caller(1)
- msg = fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Error(msg)
- }
-}
-
-func Critf(format string, args ...interface{}) {
- if Log != nil {
- msg := fmt.Sprintf(format, args...)
- pc, fn, line, _ := runtime.Caller(1)
- msg = fmt.Sprintf("%s: %d: %s: %s", fn, line, runtime.FuncForPC(pc).Name(), msg)
- Log.Crit(msg)
- }
-}
-
-func PrintStack() {
- buf := make([]byte, 1<<16)
- runtime.Stack(buf, true)
- Errorf("%s", buf)
-}
diff --git a/vendor/github.com/lxc/lxd/shared/network.go b/vendor/github.com/lxc/lxd/shared/network.go
deleted file mode 100644
index beb927e66ca6..000000000000
--- a/vendor/github.com/lxc/lxd/shared/network.go
+++ /dev/null
@@ -1,568 +0,0 @@
-package shared
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "net/http"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/gorilla/websocket"
-
- "github.com/lxc/lxd/shared/api"
- "github.com/lxc/lxd/shared/logger"
-)
-
-func RFC3493Dialer(network, address string) (net.Conn, error) {
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- return nil, err
- }
-
- addrs, err := net.LookupHost(host)
- if err != nil {
- return nil, err
- }
- for _, a := range addrs {
- c, err := net.DialTimeout(network, net.JoinHostPort(a, port), 10*time.Second)
- if err != nil {
- continue
- }
- if tc, ok := c.(*net.TCPConn); ok {
- tc.SetKeepAlive(true)
- tc.SetKeepAlivePeriod(3 * time.Second)
- }
- return c, err
- }
- return nil, fmt.Errorf("Unable to connect to: " + address)
-}
-
-// InitTLSConfig returns a tls.Config populated with default encryption
-// parameters. This is used as baseline config for both client and server
-// certificates used by LXD.
-func InitTLSConfig() *tls.Config {
- return &tls.Config{
- MinVersion: tls.VersionTLS12,
- CipherSuites: []uint16{
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- },
- PreferServerCipherSuites: true,
- }
-}
-
-func finalizeTLSConfig(tlsConfig *tls.Config, tlsRemoteCert *x509.Certificate) {
- // Setup RootCA
- if tlsConfig.RootCAs == nil {
- tlsConfig.RootCAs, _ = systemCertPool()
- }
-
- // Trusted certificates
- if tlsRemoteCert != nil {
- if tlsConfig.RootCAs == nil {
- tlsConfig.RootCAs = x509.NewCertPool()
- }
-
- // Make it a valid RootCA
- tlsRemoteCert.IsCA = true
- tlsRemoteCert.KeyUsage = x509.KeyUsageCertSign
-
- // Setup the pool
- tlsConfig.RootCAs.AddCert(tlsRemoteCert)
-
- // Set the ServerName
- if tlsRemoteCert.DNSNames != nil {
- tlsConfig.ServerName = tlsRemoteCert.DNSNames[0]
- }
- }
-
- tlsConfig.BuildNameToCertificate()
-}
-
-func GetTLSConfig(tlsClientCertFile string, tlsClientKeyFile string, tlsClientCAFile string, tlsRemoteCert *x509.Certificate) (*tls.Config, error) {
- tlsConfig := InitTLSConfig()
-
- // Client authentication
- if tlsClientCertFile != "" && tlsClientKeyFile != "" {
- cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile)
- if err != nil {
- return nil, err
- }
-
- tlsConfig.Certificates = []tls.Certificate{cert}
- }
-
- if tlsClientCAFile != "" {
- caCertificates, err := ioutil.ReadFile(tlsClientCAFile)
- if err != nil {
- return nil, err
- }
-
- caPool := x509.NewCertPool()
- caPool.AppendCertsFromPEM(caCertificates)
-
- tlsConfig.RootCAs = caPool
- }
-
- finalizeTLSConfig(tlsConfig, tlsRemoteCert)
- return tlsConfig, nil
-}
-
-func GetTLSConfigMem(tlsClientCert string, tlsClientKey string, tlsClientCA string, tlsRemoteCertPEM string, insecureSkipVerify bool) (*tls.Config, error) {
- tlsConfig := InitTLSConfig()
- tlsConfig.InsecureSkipVerify = insecureSkipVerify
- // Client authentication
- if tlsClientCert != "" && tlsClientKey != "" {
- cert, err := tls.X509KeyPair([]byte(tlsClientCert), []byte(tlsClientKey))
- if err != nil {
- return nil, err
- }
-
- tlsConfig.Certificates = []tls.Certificate{cert}
- }
-
- var tlsRemoteCert *x509.Certificate
- if tlsRemoteCertPEM != "" {
- // Ignore any content outside of the PEM bytes we care about
- certBlock, _ := pem.Decode([]byte(tlsRemoteCertPEM))
- if certBlock == nil {
- return nil, fmt.Errorf("Invalid remote certificate")
- }
-
- var err error
- tlsRemoteCert, err = x509.ParseCertificate(certBlock.Bytes)
- if err != nil {
- return nil, err
- }
- }
-
- if tlsClientCA != "" {
- caPool := x509.NewCertPool()
- caPool.AppendCertsFromPEM([]byte(tlsClientCA))
-
- tlsConfig.RootCAs = caPool
- }
-
- finalizeTLSConfig(tlsConfig, tlsRemoteCert)
-
- return tlsConfig, nil
-}
-
-func IsLoopback(iface *net.Interface) bool {
- return int(iface.Flags&net.FlagLoopback) > 0
-}
-
-func WebsocketSendStream(conn *websocket.Conn, r io.Reader, bufferSize int) chan bool {
- ch := make(chan bool)
-
- if r == nil {
- close(ch)
- return ch
- }
-
- go func(conn *websocket.Conn, r io.Reader) {
- in := ReaderToChannel(r, bufferSize)
- for {
- buf, ok := <-in
- if !ok {
- break
- }
-
- w, err := conn.NextWriter(websocket.BinaryMessage)
- if err != nil {
- logger.Debugf("Got error getting next writer %s", err)
- break
- }
-
- _, err = w.Write(buf)
- w.Close()
- if err != nil {
- logger.Debugf("Got err writing %s", err)
- break
- }
- }
- conn.WriteMessage(websocket.TextMessage, []byte{})
- ch <- true
- }(conn, r)
-
- return ch
-}
-
-func WebsocketRecvStream(w io.Writer, conn *websocket.Conn) chan bool {
- ch := make(chan bool)
-
- go func(w io.Writer, conn *websocket.Conn) {
- for {
- mt, r, err := conn.NextReader()
- if mt == websocket.CloseMessage {
- logger.Debugf("Got close message for reader")
- break
- }
-
- if mt == websocket.TextMessage {
- logger.Debugf("got message barrier")
- break
- }
-
- if err != nil {
- logger.Debugf("Got error getting next reader %s, %s", err, w)
- break
- }
-
- buf, err := ioutil.ReadAll(r)
- if err != nil {
- logger.Debugf("Got error writing to writer %s", err)
- break
- }
-
- if w == nil {
- continue
- }
-
- i, err := w.Write(buf)
- if i != len(buf) {
- logger.Debugf("Didn't write all of buf")
- break
- }
- if err != nil {
- logger.Debugf("Error writing buf %s", err)
- break
- }
- }
- ch <- true
- }(w, conn)
-
- return ch
-}
-
-func WebsocketProxy(source *websocket.Conn, target *websocket.Conn) chan bool {
- forward := func(in *websocket.Conn, out *websocket.Conn, ch chan bool) {
- for {
- mt, r, err := in.NextReader()
- if err != nil {
- break
- }
-
- w, err := out.NextWriter(mt)
- if err != nil {
- break
- }
-
- _, err = io.Copy(w, r)
- w.Close()
- if err != nil {
- break
- }
- }
-
- ch <- true
- }
-
- chSend := make(chan bool)
- go forward(source, target, chSend)
-
- chRecv := make(chan bool)
- go forward(target, source, chRecv)
-
- ch := make(chan bool)
- go func() {
- select {
- case <-chSend:
- case <-chRecv:
- }
-
- source.Close()
- target.Close()
-
- ch <- true
- }()
-
- return ch
-}
-
-func defaultReader(conn *websocket.Conn, r io.ReadCloser, readDone chan<- bool) {
- /* For now, we don't need to adjust buffer sizes in
- * WebsocketMirror, since it's used for interactive things like
- * exec.
- */
- in := ReaderToChannel(r, -1)
- for {
- buf, ok := <-in
- if !ok {
- r.Close()
- logger.Debugf("sending write barrier")
- conn.WriteMessage(websocket.TextMessage, []byte{})
- readDone <- true
- return
- }
- w, err := conn.NextWriter(websocket.BinaryMessage)
- if err != nil {
- logger.Debugf("Got error getting next writer %s", err)
- break
- }
-
- _, err = w.Write(buf)
- w.Close()
- if err != nil {
- logger.Debugf("Got err writing %s", err)
- break
- }
- }
- closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
- conn.WriteMessage(websocket.CloseMessage, closeMsg)
- readDone <- true
- r.Close()
-}
-
-func DefaultWriter(conn *websocket.Conn, w io.WriteCloser, writeDone chan<- bool) {
- for {
- mt, r, err := conn.NextReader()
- if err != nil {
- logger.Debugf("Got error getting next reader %s", err)
- break
- }
-
- if mt == websocket.CloseMessage {
- logger.Debugf("Got close message for reader")
- break
- }
-
- if mt == websocket.TextMessage {
- logger.Debugf("Got message barrier, resetting stream")
- break
- }
-
- buf, err := ioutil.ReadAll(r)
- if err != nil {
- logger.Debugf("Got error writing to writer %s", err)
- break
- }
- i, err := w.Write(buf)
- if i != len(buf) {
- logger.Debugf("Didn't write all of buf")
- break
- }
- if err != nil {
- logger.Debugf("Error writing buf %s", err)
- break
- }
- }
- writeDone <- true
- w.Close()
-}
-
-// WebsocketIO is a wrapper implementing ReadWriteCloser on top of websocket
-type WebsocketIO struct {
- Conn *websocket.Conn
- reader io.Reader
- mu sync.Mutex
-}
-
-func (w *WebsocketIO) Read(p []byte) (n int, err error) {
- for {
- // First read from this message
- if w.reader == nil {
- var mt int
-
- mt, w.reader, err = w.Conn.NextReader()
- if err != nil {
- return -1, err
- }
-
- if mt == websocket.CloseMessage {
- return 0, io.EOF
- }
-
- if mt == websocket.TextMessage {
- return 0, io.EOF
- }
- }
-
- // Perform the read itself
- n, err := w.reader.Read(p)
- if err == io.EOF {
- // At the end of the message, reset reader
- w.reader = nil
- return n, nil
- }
-
- if err != nil {
- return -1, err
- }
-
- return n, nil
- }
-}
-
-func (w *WebsocketIO) Write(p []byte) (n int, err error) {
- w.mu.Lock()
- defer w.mu.Unlock()
- wr, err := w.Conn.NextWriter(websocket.BinaryMessage)
- if err != nil {
- return -1, err
- }
- defer wr.Close()
-
- n, err = wr.Write(p)
- if err != nil {
- return -1, err
- }
-
- return n, nil
-}
-
-// Close sends a control message indicating the stream is finished, but it does not actually close
-// the socket.
-func (w *WebsocketIO) Close() error {
- w.mu.Lock()
- defer w.mu.Unlock()
- // Target expects to get a control message indicating stream is finished.
- return w.Conn.WriteMessage(websocket.TextMessage, []byte{})
-}
-
-// WebsocketMirror allows mirroring a reader to a websocket and taking the
-// result and writing it to a writer. This function allows for multiple
-// mirrorings and correctly negotiates stream endings. However, it means any
-// websocket.Conns passed to it are live when it returns, and must be closed
-// explicitly.
-type WebSocketMirrorReader func(conn *websocket.Conn, r io.ReadCloser, readDone chan<- bool)
-type WebSocketMirrorWriter func(conn *websocket.Conn, w io.WriteCloser, writeDone chan<- bool)
-
-func WebsocketMirror(conn *websocket.Conn, w io.WriteCloser, r io.ReadCloser, Reader WebSocketMirrorReader, Writer WebSocketMirrorWriter) (chan bool, chan bool) {
- readDone := make(chan bool, 1)
- writeDone := make(chan bool, 1)
-
- ReadFunc := Reader
- if ReadFunc == nil {
- ReadFunc = defaultReader
- }
-
- WriteFunc := Writer
- if WriteFunc == nil {
- WriteFunc = DefaultWriter
- }
-
- go ReadFunc(conn, r, readDone)
- go WriteFunc(conn, w, writeDone)
-
- return readDone, writeDone
-}
-
-func WebsocketConsoleMirror(conn *websocket.Conn, w io.WriteCloser, r io.ReadCloser) (chan bool, chan bool) {
- readDone := make(chan bool, 1)
- writeDone := make(chan bool, 1)
-
- go DefaultWriter(conn, w, writeDone)
-
- go func(conn *websocket.Conn, r io.ReadCloser) {
- in := ReaderToChannel(r, -1)
- for {
- buf, ok := <-in
- if !ok {
- r.Close()
- logger.Debugf("sending write barrier")
- conn.WriteMessage(websocket.TextMessage, []byte{})
- readDone <- true
- return
- }
- w, err := conn.NextWriter(websocket.BinaryMessage)
- if err != nil {
- logger.Debugf("Got error getting next writer %s", err)
- break
- }
-
- _, err = w.Write(buf)
- w.Close()
- if err != nil {
- logger.Debugf("Got err writing %s", err)
- break
- }
- }
- closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
- conn.WriteMessage(websocket.CloseMessage, closeMsg)
- readDone <- true
- r.Close()
- }(conn, r)
-
- return readDone, writeDone
-}
-
-var WebsocketUpgrader = websocket.Upgrader{
- CheckOrigin: func(r *http.Request) bool { return true },
-}
-
-// AllocatePort asks the kernel for a free open port that is ready to use
-func AllocatePort() (int, error) {
- addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
- if err != nil {
- return -1, err
- }
-
- l, err := net.ListenTCP("tcp", addr)
- if err != nil {
- return -1, err
- }
- defer l.Close()
- return l.Addr().(*net.TCPAddr).Port, nil
-}
-
-func NetworkGetCounters(ifName string) api.NetworkStateCounters {
- counters := api.NetworkStateCounters{}
- // Get counters
- content, err := ioutil.ReadFile("/proc/net/dev")
- if err == nil {
- for _, line := range strings.Split(string(content), "\n") {
- fields := strings.Fields(line)
-
- if len(fields) != 17 {
- continue
- }
-
- intName := strings.TrimSuffix(fields[0], ":")
- if intName != ifName {
- continue
- }
-
- rxBytes, err := strconv.ParseInt(fields[1], 10, 64)
- if err != nil {
- continue
- }
-
- rxPackets, err := strconv.ParseInt(fields[2], 10, 64)
- if err != nil {
- continue
- }
-
- txBytes, err := strconv.ParseInt(fields[9], 10, 64)
- if err != nil {
- continue
- }
-
- txPackets, err := strconv.ParseInt(fields[10], 10, 64)
- if err != nil {
- continue
- }
-
- counters.BytesSent = txBytes
- counters.BytesReceived = rxBytes
- counters.PacketsSent = txPackets
- counters.PacketsReceived = rxPackets
- break
- }
- }
-
- return counters
-}
diff --git a/vendor/github.com/lxc/lxd/shared/network_unix.go b/vendor/github.com/lxc/lxd/shared/network_unix.go
deleted file mode 100644
index 1e5cdc7cca72..000000000000
--- a/vendor/github.com/lxc/lxd/shared/network_unix.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build !windows
-
-package shared
-
-import (
- "crypto/x509"
- "io/ioutil"
-)
-
-func systemCertPool() (*x509.CertPool, error) {
- // Get the system pool
- pool, err := x509.SystemCertPool()
- if err != nil {
- return nil, err
- }
-
- // Attempt to load the system's pool too (for snaps)
- if PathExists("/var/lib/snapd/hostfs/etc/ssl/certs/ca-certificates.crt") {
- snapCerts, err := ioutil.ReadFile("/var/lib/snapd/hostfs/etc/ssl/certs/ca-certificates.crt")
- if err == nil {
- pool.AppendCertsFromPEM(snapCerts)
- }
- }
-
- return pool, nil
-}
diff --git a/vendor/github.com/lxc/lxd/shared/network_windows.go b/vendor/github.com/lxc/lxd/shared/network_windows.go
deleted file mode 100644
index e07aa2782461..000000000000
--- a/vendor/github.com/lxc/lxd/shared/network_windows.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// +build windows
-
-package shared
-
-import (
- "crypto/x509"
- "fmt"
- "sync"
- "unsafe"
-
- "golang.org/x/sys/windows"
-)
-
-var once sync.Once
-var systemRoots *x509.CertPool
-
-func systemCertPool() (*x509.CertPool, error) {
- once.Do(initSystemRoots)
- if systemRoots == nil {
- return nil, fmt.Errorf("Bad system root pool")
- }
- return systemRoots, nil
-}
-
-func initSystemRoots() {
- const CRYPT_E_NOT_FOUND = 0x80092004
-
- store, err := windows.CertOpenSystemStore(0, windows.StringToUTF16Ptr("ROOT"))
- if err != nil {
- systemRoots = nil
- return
- }
- defer windows.CertCloseStore(store, 0)
-
- roots := x509.NewCertPool()
- var cert *windows.CertContext
- for {
- cert, err = windows.CertEnumCertificatesInStore(store, cert)
- if err != nil {
- if errno, ok := err.(windows.Errno); ok {
- if errno == CRYPT_E_NOT_FOUND {
- break
- }
- }
- systemRoots = nil
- return
- }
- if cert == nil {
- break
- }
- // Copy the buf, since ParseCertificate does not create its own copy.
- buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:]
- buf2 := make([]byte, cert.Length)
- copy(buf2, buf)
- if c, err := x509.ParseCertificate(buf2); err == nil {
- roots.AddCert(c)
- }
- }
- systemRoots = roots
-}
diff --git a/vendor/github.com/lxc/lxd/shared/proxy.go b/vendor/github.com/lxc/lxd/shared/proxy.go
deleted file mode 100644
index 56bb19a9e8d2..000000000000
--- a/vendor/github.com/lxc/lxd/shared/proxy.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package shared
-
-import (
- "fmt"
- "net"
- "net/http"
- "net/url"
- "os"
- "strings"
- "sync"
-)
-
-var (
- httpProxyEnv = &envOnce{
- names: []string{"HTTP_PROXY", "http_proxy"},
- }
- httpsProxyEnv = &envOnce{
- names: []string{"HTTPS_PROXY", "https_proxy"},
- }
- noProxyEnv = &envOnce{
- names: []string{"NO_PROXY", "no_proxy"},
- }
-)
-
-type envOnce struct {
- names []string
- once sync.Once
- val string
-}
-
-func (e *envOnce) Get() string {
- e.once.Do(e.init)
- return e.val
-}
-
-func (e *envOnce) init() {
- for _, n := range e.names {
- e.val = os.Getenv(n)
- if e.val != "" {
- return
- }
- }
-}
-
-// This is basically the same as golang's ProxyFromEnvironment, except it
-// doesn't fall back to http_proxy when https_proxy isn't around, which is
-// incorrect behavior. It still respects HTTP_PROXY, HTTPS_PROXY, and NO_PROXY.
-func ProxyFromEnvironment(req *http.Request) (*url.URL, error) {
- return ProxyFromConfig("", "", "")(req)
-}
-
-func ProxyFromConfig(httpsProxy string, httpProxy string, noProxy string) func(req *http.Request) (*url.URL, error) {
- return func(req *http.Request) (*url.URL, error) {
- var proxy, port string
- var err error
-
- switch req.URL.Scheme {
- case "https":
- proxy = httpsProxy
- if proxy == "" {
- proxy = httpsProxyEnv.Get()
- }
- port = ":443"
- case "http":
- proxy = httpProxy
- if proxy == "" {
- proxy = httpProxyEnv.Get()
- }
- port = ":80"
- default:
- return nil, fmt.Errorf("unknown scheme %s", req.URL.Scheme)
- }
-
- if proxy == "" {
- return nil, nil
- }
-
- addr := req.URL.Host
- if !hasPort(addr) {
- addr = addr + port
- }
-
- use, err := useProxy(addr, noProxy)
- if err != nil {
- return nil, err
- }
- if !use {
- return nil, nil
- }
-
- proxyURL, err := url.Parse(proxy)
- if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
- // proxy was bogus. Try prepending "http://" to it and
- // see if that parses correctly. If not, we fall
- // through and complain about the original one.
- if proxyURL, err := url.Parse("http://" + proxy); err == nil {
- return proxyURL, nil
- }
- }
- if err != nil {
- return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
- }
- return proxyURL, nil
- }
-}
-
-func hasPort(s string) bool {
- return strings.LastIndex(s, ":") > strings.LastIndex(s, "]")
-}
-
-func useProxy(addr string, noProxy string) (bool, error) {
- if noProxy == "" {
- noProxy = noProxyEnv.Get()
- }
-
- if len(addr) == 0 {
- return true, nil
- }
- host, _, err := net.SplitHostPort(addr)
- if err != nil {
- return false, nil
- }
- if host == "localhost" {
- return false, nil
- }
- if ip := net.ParseIP(host); ip != nil {
- if ip.IsLoopback() {
- return false, nil
- }
- }
-
- if noProxy == "*" {
- return false, nil
- }
-
- addr = strings.ToLower(strings.TrimSpace(addr))
- if hasPort(addr) {
- addr = addr[:strings.LastIndex(addr, ":")]
- }
-
- for _, p := range strings.Split(noProxy, ",") {
- p = strings.ToLower(strings.TrimSpace(p))
- if len(p) == 0 {
- continue
- }
- if hasPort(p) {
- p = p[:strings.LastIndex(p, ":")]
- }
- if addr == p {
- return false, nil
- }
- if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) {
- // noProxy ".foo.com" matches "bar.foo.com" or "foo.com"
- return false, nil
- }
- if p[0] != '.' && strings.HasSuffix(addr, p) && addr[len(addr)-len(p)-1] == '.' {
- // noProxy "foo.com" matches "bar.foo.com"
- return false, nil
- }
- }
- return true, nil
-}
diff --git a/vendor/github.com/lxc/lxd/shared/units/units.go b/vendor/github.com/lxc/lxd/shared/units/units.go
deleted file mode 100644
index 23514c9c3071..000000000000
--- a/vendor/github.com/lxc/lxd/shared/units/units.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package units
-
-import (
- "fmt"
- "strconv"
-)
-
-// ParseByteSizeString parses a human representation of an amount of
-// data into a number of bytes
-func ParseByteSizeString(input string) (int64, error) {
- // Empty input
- if input == "" {
- return 0, nil
- }
-
- // Find where the suffix begins
- suffixLen := 0
- for i, chr := range []byte(input) {
- _, err := strconv.Atoi(string([]byte{chr}))
- if err != nil {
- suffixLen = len(input) - i
- break
- }
- }
-
- if suffixLen == len(input) {
- return -1, fmt.Errorf("Invalid value: %s", input)
- }
-
- // Extract the suffix
- suffix := input[len(input)-suffixLen:]
-
- // Extract the value
- value := input[0 : len(input)-suffixLen]
- valueInt, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- return -1, fmt.Errorf("Invalid integer: %s", input)
- }
-
- // Figure out the multiplicator
- multiplicator := int64(0)
- switch suffix {
- case "", "B", " bytes":
- multiplicator = 1
- case "kB":
- multiplicator = 1000
- case "MB":
- multiplicator = 1000 * 1000
- case "GB":
- multiplicator = 1000 * 1000 * 1000
- case "TB":
- multiplicator = 1000 * 1000 * 1000 * 1000
- case "PB":
- multiplicator = 1000 * 1000 * 1000 * 1000 * 1000
- case "EB":
- multiplicator = 1000 * 1000 * 1000 * 1000 * 1000 * 1000
- case "KiB":
- multiplicator = 1024
- case "MiB":
- multiplicator = 1024 * 1024
- case "GiB":
- multiplicator = 1024 * 1024 * 1024
- case "TiB":
- multiplicator = 1024 * 1024 * 1024 * 1024
- case "PiB":
- multiplicator = 1024 * 1024 * 1024 * 1024 * 1024
- case "EiB":
- multiplicator = 1024 * 1024 * 1024 * 1024 * 1024 * 1024
- default:
- return -1, fmt.Errorf("Invalid value: %s", input)
- }
-
- return valueInt * multiplicator, nil
-}
-
-// ParseBitSizeString parses a human representation of an amount of
-// data into a number of bits
-func ParseBitSizeString(input string) (int64, error) {
- // Empty input
- if input == "" {
- return 0, nil
- }
-
- // Find where the suffix begins
- suffixLen := 0
- for i, chr := range []byte(input) {
- _, err := strconv.Atoi(string([]byte{chr}))
- if err != nil {
- suffixLen = len(input) - i
- break
- }
- }
-
- if suffixLen == len(input) {
- return -1, fmt.Errorf("Invalid value: %s", input)
- }
-
- // Extract the suffix
- suffix := input[len(input)-suffixLen:]
-
- // Extract the value
- value := input[0 : len(input)-suffixLen]
- valueInt, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- return -1, fmt.Errorf("Invalid integer: %s", input)
- }
-
- // Figure out the multiplicator
- multiplicator := int64(0)
- switch suffix {
- case "", "bit":
- multiplicator = 1
- case "kbit":
- multiplicator = 1000
- case "Mbit":
- multiplicator = 1000 * 1000
- case "Gbit":
- multiplicator = 1000 * 1000 * 1000
- case "Tbit":
- multiplicator = 1000 * 1000 * 1000 * 1000
- case "Pbit":
- multiplicator = 1000 * 1000 * 1000 * 1000 * 1000
- case "Ebit":
- multiplicator = 1000 * 1000 * 1000 * 1000 * 1000 * 1000
- case "Kibit":
- multiplicator = 1024
- case "Mibit":
- multiplicator = 1024 * 1024
- case "Gibit":
- multiplicator = 1024 * 1024 * 1024
- case "Tibit":
- multiplicator = 1024 * 1024 * 1024 * 1024
- case "Pibit":
- multiplicator = 1024 * 1024 * 1024 * 1024 * 1024
- case "Eibit":
- multiplicator = 1024 * 1024 * 1024 * 1024 * 1024 * 1024
-
- default:
- return -1, fmt.Errorf("Unsupported suffix: %s", suffix)
- }
-
- return valueInt * multiplicator, nil
-}
-
-// GetByteSizeString takes a number of bytes and precision and returns a
-// human representation of the amount of data
-func GetByteSizeString(input int64, precision uint) string {
- if input < 1000 {
- return fmt.Sprintf("%dB", input)
- }
-
- value := float64(input)
-
- for _, unit := range []string{"kB", "MB", "GB", "TB", "PB", "EB"} {
- value = value / 1000
- if value < 1000 {
- return fmt.Sprintf("%.*f%s", precision, value, unit)
- }
- }
-
- return fmt.Sprintf("%.*fEB", precision, value)
-}
diff --git a/vendor/github.com/lxc/lxd/shared/util.go b/vendor/github.com/lxc/lxd/shared/util.go
deleted file mode 100644
index e307f985fea2..000000000000
--- a/vendor/github.com/lxc/lxd/shared/util.go
+++ /dev/null
@@ -1,1115 +0,0 @@
-package shared
-
-import (
- "bufio"
- "bytes"
- "crypto/rand"
- "encoding/gob"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "hash"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "reflect"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- "github.com/flosch/pongo2"
- "github.com/pkg/errors"
-
- "github.com/lxc/lxd/shared/cancel"
- "github.com/lxc/lxd/shared/ioprogress"
- "github.com/lxc/lxd/shared/units"
-)
-
-const SnapshotDelimiter = "/"
-const DefaultPort = "8443"
-
-// URLEncode encodes a path and query parameters to a URL.
-func URLEncode(path string, query map[string]string) (string, error) {
- u, err := url.Parse(path)
- if err != nil {
- return "", err
- }
-
- params := url.Values{}
- for key, value := range query {
- params.Add(key, value)
- }
- u.RawQuery = params.Encode()
- return u.String(), nil
-}
-
-// AddSlash adds a slash to the end of paths if they don't already have one.
-// This can be useful for rsyncing things, since rsync has behavior present on
-// the presence or absence of a trailing slash.
-func AddSlash(path string) string {
- if path[len(path)-1] != '/' {
- return path + "/"
- }
-
- return path
-}
-
-func PathExists(name string) bool {
- _, err := os.Lstat(name)
- if err != nil && os.IsNotExist(err) {
- return false
- }
- return true
-}
-
-// PathIsEmpty checks if the given path is empty.
-func PathIsEmpty(path string) (bool, error) {
- f, err := os.Open(path)
- if err != nil {
- return false, err
- }
- defer f.Close()
-
- // read in ONLY one file
- _, err = f.Readdir(1)
-
- // and if the file is EOF... well, the dir is empty.
- if err == io.EOF {
- return true, nil
- }
- return false, err
-}
-
-// IsDir returns true if the given path is a directory.
-func IsDir(name string) bool {
- stat, err := os.Stat(name)
- if err != nil {
- return false
- }
- return stat.IsDir()
-}
-
-// IsUnixSocket returns true if the given path is either a Unix socket
-// or a symbolic link pointing at a Unix socket.
-func IsUnixSocket(path string) bool {
- stat, err := os.Stat(path)
- if err != nil {
- return false
- }
- return (stat.Mode() & os.ModeSocket) == os.ModeSocket
-}
-
-// HostPath returns the host path for the provided path
-// On a normal system, this does nothing
-// When inside of a snap environment, returns the real path
-func HostPath(path string) string {
- // Ignore empty paths
- if len(path) == 0 {
- return path
- }
-
- // Don't prefix stdin/stdout
- if path == "-" {
- return path
- }
-
- // Check if we're running in a snap package
- _, inSnap := os.LookupEnv("SNAP")
- snapName := os.Getenv("SNAP_NAME")
- if !inSnap || snapName != "lxd" {
- return path
- }
-
- // Handle relative paths
- if path[0] != os.PathSeparator {
- // Use the cwd of the parent as snap-confine alters our own cwd on launch
- ppid := os.Getppid()
- if ppid < 1 {
- return path
- }
-
- pwd, err := os.Readlink(fmt.Sprintf("/proc/%d/cwd", ppid))
- if err != nil {
- return path
- }
-
- path = filepath.Clean(strings.Join([]string{pwd, path}, string(os.PathSeparator)))
- }
-
- // Check if the path is already snap-aware
- for _, prefix := range []string{"/dev", "/snap", "/var/snap", "/var/lib/snapd"} {
- if path == prefix || strings.HasPrefix(path, fmt.Sprintf("%s/", prefix)) {
- return path
- }
- }
-
- return fmt.Sprintf("/var/lib/snapd/hostfs%s", path)
-}
-
-// VarPath returns the provided path elements joined by a slash and
-// appended to the end of $LXD_DIR, which defaults to /var/lib/lxd.
-func VarPath(path ...string) string {
- varDir := os.Getenv("LXD_DIR")
- if varDir == "" {
- varDir = "/var/lib/lxd"
- }
-
- items := []string{varDir}
- items = append(items, path...)
- return filepath.Join(items...)
-}
-
-// CachePath returns the directory that LXD should its cache under. If LXD_DIR is
-// set, this path is $LXD_DIR/cache, otherwise it is /var/cache/lxd.
-func CachePath(path ...string) string {
- varDir := os.Getenv("LXD_DIR")
- logDir := "/var/cache/lxd"
- if varDir != "" {
- logDir = filepath.Join(varDir, "cache")
- }
- items := []string{logDir}
- items = append(items, path...)
- return filepath.Join(items...)
-}
-
-// LogPath returns the directory that LXD should put logs under. If LXD_DIR is
-// set, this path is $LXD_DIR/logs, otherwise it is /var/log/lxd.
-func LogPath(path ...string) string {
- varDir := os.Getenv("LXD_DIR")
- logDir := "/var/log/lxd"
- if varDir != "" {
- logDir = filepath.Join(varDir, "logs")
- }
- items := []string{logDir}
- items = append(items, path...)
- return filepath.Join(items...)
-}
-
-func ParseLXDFileHeaders(headers http.Header) (uid int64, gid int64, mode int, type_ string, write string) {
- uid, err := strconv.ParseInt(headers.Get("X-LXD-uid"), 10, 64)
- if err != nil {
- uid = -1
- }
-
- gid, err = strconv.ParseInt(headers.Get("X-LXD-gid"), 10, 64)
- if err != nil {
- gid = -1
- }
-
- mode, err = strconv.Atoi(headers.Get("X-LXD-mode"))
- if err != nil {
- mode = -1
- } else {
- rawMode, err := strconv.ParseInt(headers.Get("X-LXD-mode"), 0, 0)
- if err == nil {
- mode = int(os.FileMode(rawMode) & os.ModePerm)
- }
- }
-
- type_ = headers.Get("X-LXD-type")
- /* backwards compat: before "type" was introduced, we could only
- * manipulate files
- */
- if type_ == "" {
- type_ = "file"
- }
-
- write = headers.Get("X-LXD-write")
- /* backwards compat: before "write" was introduced, we could only
- * overwrite files
- */
- if write == "" {
- write = "overwrite"
- }
-
- return uid, gid, mode, type_, write
-}
-
-func ReadToJSON(r io.Reader, req interface{}) error {
- buf, err := ioutil.ReadAll(r)
- if err != nil {
- return err
- }
-
- return json.Unmarshal(buf, req)
-}
-
-func ReaderToChannel(r io.Reader, bufferSize int) <-chan []byte {
- if bufferSize <= 128*1024 {
- bufferSize = 128 * 1024
- }
-
- ch := make(chan ([]byte))
-
- go func() {
- readSize := 128 * 1024
- offset := 0
- buf := make([]byte, bufferSize)
-
- for {
- read := buf[offset : offset+readSize]
- nr, err := r.Read(read)
- offset += nr
- if offset > 0 && (offset+readSize >= bufferSize || err != nil) {
- ch <- buf[0:offset]
- offset = 0
- buf = make([]byte, bufferSize)
- }
-
- if err != nil {
- close(ch)
- break
- }
- }
- }()
-
- return ch
-}
-
-// Returns a random base64 encoded string from crypto/rand.
-func RandomCryptoString() (string, error) {
- buf := make([]byte, 32)
- n, err := rand.Read(buf)
- if err != nil {
- return "", err
- }
-
- if n != len(buf) {
- return "", fmt.Errorf("not enough random bytes read")
- }
-
- return hex.EncodeToString(buf), nil
-}
-
-func SplitExt(fpath string) (string, string) {
- b := path.Base(fpath)
- ext := path.Ext(fpath)
- return b[:len(b)-len(ext)], ext
-}
-
-func AtoiEmptyDefault(s string, def int) (int, error) {
- if s == "" {
- return def, nil
- }
-
- return strconv.Atoi(s)
-}
-
-func ReadStdin() ([]byte, error) {
- buf := bufio.NewReader(os.Stdin)
- line, _, err := buf.ReadLine()
- if err != nil {
- return nil, err
- }
- return line, nil
-}
-
-func WriteAll(w io.Writer, data []byte) error {
- buf := bytes.NewBuffer(data)
-
- toWrite := int64(buf.Len())
- for {
- n, err := io.Copy(w, buf)
- if err != nil {
- return err
- }
-
- toWrite -= n
- if toWrite <= 0 {
- return nil
- }
- }
-}
-
-// FileMove tries to move a file by using os.Rename,
-// if that fails it tries to copy the file and remove the source.
-func FileMove(oldPath string, newPath string) error {
- err := os.Rename(oldPath, newPath)
- if err == nil {
- return nil
- }
-
- err = FileCopy(oldPath, newPath)
- if err != nil {
- return err
- }
-
- os.Remove(oldPath)
-
- return nil
-}
-
-// FileCopy copies a file, overwriting the target if it exists.
-func FileCopy(source string, dest string) error {
- fi, err := os.Lstat(source)
- if err != nil {
- return err
- }
-
- _, uid, gid := GetOwnerMode(fi)
-
- if fi.Mode()&os.ModeSymlink != 0 {
- target, err := os.Readlink(source)
- if err != nil {
- return err
- }
-
- if PathExists(dest) {
- err = os.Remove(dest)
- if err != nil {
- return err
- }
- }
-
- err = os.Symlink(target, dest)
- if err != nil {
- return err
- }
-
- if runtime.GOOS != "windows" {
- return os.Lchown(dest, uid, gid)
- }
-
- return nil
- }
-
- s, err := os.Open(source)
- if err != nil {
- return err
- }
- defer s.Close()
-
- d, err := os.Create(dest)
- if err != nil {
- if os.IsExist(err) {
- d, err = os.OpenFile(dest, os.O_WRONLY, fi.Mode())
- if err != nil {
- return err
- }
- } else {
- return err
- }
- }
- defer d.Close()
-
- _, err = io.Copy(d, s)
- if err != nil {
- return err
- }
-
- /* chown not supported on windows */
- if runtime.GOOS != "windows" {
- return d.Chown(uid, gid)
- }
-
- return nil
-}
-
-// DirCopy copies a directory recursively, overwriting the target if it exists.
-func DirCopy(source string, dest string) error {
- // Get info about source.
- info, err := os.Stat(source)
- if err != nil {
- return errors.Wrapf(err, "failed to get source directory info")
- }
-
- if !info.IsDir() {
- return fmt.Errorf("source is not a directory")
- }
-
- // Remove dest if it already exists.
- if PathExists(dest) {
- err := os.RemoveAll(dest)
- if err != nil {
- return errors.Wrapf(err, "failed to remove destination directory %s", dest)
- }
- }
-
- // Create dest.
- err = os.MkdirAll(dest, info.Mode())
- if err != nil {
- return errors.Wrapf(err, "failed to create destination directory %s", dest)
- }
-
- // Copy all files.
- entries, err := ioutil.ReadDir(source)
- if err != nil {
- return errors.Wrapf(err, "failed to read source directory %s", source)
- }
-
- for _, entry := range entries {
-
- sourcePath := filepath.Join(source, entry.Name())
- destPath := filepath.Join(dest, entry.Name())
-
- if entry.IsDir() {
- err := DirCopy(sourcePath, destPath)
- if err != nil {
- return errors.Wrapf(err, "failed to copy sub-directory from %s to %s", sourcePath, destPath)
- }
- } else {
- err := FileCopy(sourcePath, destPath)
- if err != nil {
- return errors.Wrapf(err, "failed to copy file from %s to %s", sourcePath, destPath)
- }
- }
-
- }
-
- return nil
-}
-
-type BytesReadCloser struct {
- Buf *bytes.Buffer
-}
-
-func (r BytesReadCloser) Read(b []byte) (n int, err error) {
- return r.Buf.Read(b)
-}
-
-func (r BytesReadCloser) Close() error {
- /* no-op since we're in memory */
- return nil
-}
-
-func IsSnapshot(name string) bool {
- return strings.Contains(name, SnapshotDelimiter)
-}
-
-func MkdirAllOwner(path string, perm os.FileMode, uid int, gid int) error {
- // This function is a slightly modified version of MkdirAll from the Go standard library.
- // https://golang.org/src/os/path.go?s=488:535#L9
-
- // Fast path: if we can tell whether path is a directory or file, stop with success or error.
- dir, err := os.Stat(path)
- if err == nil {
- if dir.IsDir() {
- return nil
- }
- return fmt.Errorf("path exists but isn't a directory")
- }
-
- // Slow path: make sure parent exists and then call Mkdir for path.
- i := len(path)
- for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
- i--
- }
-
- j := i
- for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
- j--
- }
-
- if j > 1 {
- // Create parent
- err = MkdirAllOwner(path[0:j-1], perm, uid, gid)
- if err != nil {
- return err
- }
- }
-
- // Parent now exists; invoke Mkdir and use its result.
- err = os.Mkdir(path, perm)
-
- err_chown := os.Chown(path, uid, gid)
- if err_chown != nil {
- return err_chown
- }
-
- if err != nil {
- // Handle arguments like "foo/." by
- // double-checking that directory doesn't exist.
- dir, err1 := os.Lstat(path)
- if err1 == nil && dir.IsDir() {
- return nil
- }
- return err
- }
- return nil
-}
-
-func StringInSlice(key string, list []string) bool {
- for _, entry := range list {
- if entry == key {
- return true
- }
- }
- return false
-}
-
-func IntInSlice(key int, list []int) bool {
- for _, entry := range list {
- if entry == key {
- return true
- }
- }
- return false
-}
-
-func Int64InSlice(key int64, list []int64) bool {
- for _, entry := range list {
- if entry == key {
- return true
- }
- }
- return false
-}
-
-func IsTrue(value string) bool {
- if StringInSlice(strings.ToLower(value), []string{"true", "1", "yes", "on"}) {
- return true
- }
-
- return false
-}
-
-// StringMapHasStringKey returns true if any of the supplied keys are present in the map.
-func StringMapHasStringKey(m map[string]string, keys ...string) bool {
- for _, k := range keys {
- if _, ok := m[k]; ok {
- return true
- }
- }
-
- return false
-}
-
-func IsUnixDev(path string) bool {
- stat, err := os.Stat(path)
- if err != nil {
- return false
-
- }
-
- if (stat.Mode() & os.ModeDevice) == 0 {
- return false
- }
-
- return true
-}
-
-func IsBlockdev(fm os.FileMode) bool {
- return ((fm&os.ModeDevice != 0) && (fm&os.ModeCharDevice == 0))
-}
-
-func IsBlockdevPath(pathName string) bool {
- sb, err := os.Stat(pathName)
- if err != nil {
- return false
- }
-
- fm := sb.Mode()
- return ((fm&os.ModeDevice != 0) && (fm&os.ModeCharDevice == 0))
-}
-
-// DeepCopy copies src to dest by using encoding/gob so its not that fast.
-func DeepCopy(src, dest interface{}) error {
- buff := new(bytes.Buffer)
- enc := gob.NewEncoder(buff)
- dec := gob.NewDecoder(buff)
- if err := enc.Encode(src); err != nil {
- return err
- }
-
- if err := dec.Decode(dest); err != nil {
- return err
- }
-
- return nil
-}
-
-func RunningInUserNS() bool {
- file, err := os.Open("/proc/self/uid_map")
- if err != nil {
- return false
- }
- defer file.Close()
-
- buf := bufio.NewReader(file)
- l, _, err := buf.ReadLine()
- if err != nil {
- return false
- }
-
- line := string(l)
- var a, b, c int64
- fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
- if a == 0 && b == 0 && c == 4294967295 {
- return false
- }
- return true
-}
-
-func ValidHostname(name string) bool {
- // Validate length
- if len(name) < 1 || len(name) > 63 {
- return false
- }
-
- // Validate first character
- if strings.HasPrefix(name, "-") {
- return false
- }
-
- if _, err := strconv.Atoi(string(name[0])); err == nil {
- return false
- }
-
- // Validate last character
- if strings.HasSuffix(name, "-") {
- return false
- }
-
- // Validate the character set
- match, _ := regexp.MatchString("^[-a-zA-Z0-9]*$", name)
- if !match {
- return false
- }
-
- return true
-}
-
-// Spawn the editor with a temporary YAML file for editing configs
-func TextEditor(inPath string, inContent []byte) ([]byte, error) {
- var f *os.File
- var err error
- var path string
-
- // Detect the text editor to use
- editor := os.Getenv("VISUAL")
- if editor == "" {
- editor = os.Getenv("EDITOR")
- if editor == "" {
- for _, p := range []string{"editor", "vi", "emacs", "nano"} {
- _, err := exec.LookPath(p)
- if err == nil {
- editor = p
- break
- }
- }
- if editor == "" {
- return []byte{}, fmt.Errorf("No text editor found, please set the EDITOR environment variable")
- }
- }
- }
-
- if inPath == "" {
- // If provided input, create a new file
- f, err = ioutil.TempFile("", "lxd_editor_")
- if err != nil {
- return []byte{}, err
- }
-
- err = os.Chmod(f.Name(), 0600)
- if err != nil {
- f.Close()
- os.Remove(f.Name())
- return []byte{}, err
- }
-
- f.Write(inContent)
- f.Close()
-
- path = fmt.Sprintf("%s.yaml", f.Name())
- os.Rename(f.Name(), path)
- defer os.Remove(path)
- } else {
- path = inPath
- }
-
- cmdParts := strings.Fields(editor)
- cmd := exec.Command(cmdParts[0], append(cmdParts[1:], path)...)
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err = cmd.Run()
- if err != nil {
- return []byte{}, err
- }
-
- content, err := ioutil.ReadFile(path)
- if err != nil {
- return []byte{}, err
- }
-
- return content, nil
-}
-
-func ParseMetadata(metadata interface{}) (map[string]interface{}, error) {
- newMetadata := make(map[string]interface{})
- s := reflect.ValueOf(metadata)
- if !s.IsValid() {
- return nil, nil
- }
-
- if s.Kind() == reflect.Map {
- for _, k := range s.MapKeys() {
- if k.Kind() != reflect.String {
- return nil, fmt.Errorf("Invalid metadata provided (key isn't a string)")
- }
- newMetadata[k.String()] = s.MapIndex(k).Interface()
- }
- } else if s.Kind() == reflect.Ptr && !s.Elem().IsValid() {
- return nil, nil
- } else {
- return nil, fmt.Errorf("Invalid metadata provided (type isn't a map)")
- }
-
- return newMetadata, nil
-}
-
-// RemoveDuplicatesFromString removes all duplicates of the string 'sep'
-// from the specified string 's'. Leading and trailing occurrences of sep
-// are NOT removed (duplicate leading/trailing are). Performs poorly if
-// there are multiple consecutive redundant separators.
-func RemoveDuplicatesFromString(s string, sep string) string {
- dup := sep + sep
- for s = strings.Replace(s, dup, sep, -1); strings.Contains(s, dup); s = strings.Replace(s, dup, sep, -1) {
-
- }
- return s
-}
-
-type RunError struct {
- msg string
- Err error
- Stdout string
- Stderr string
-}
-
-func (e RunError) Error() string {
- return e.msg
-}
-
-// RunCommandSplit runs a command with a supplied environment and optional arguments and returns the
-// resulting stdout and stderr output as separate variables. If the supplied environment is nil then
-// the default environment is used. If the command fails to start or returns a non-zero exit code
-// then an error is returned containing the output of stderr too.
-func RunCommandSplit(env []string, name string, arg ...string) (string, string, error) {
- cmd := exec.Command(name, arg...)
-
- if env != nil {
- cmd.Env = env
- }
-
- var stdout bytes.Buffer
- var stderr bytes.Buffer
- cmd.Stdout = &stdout
- cmd.Stderr = &stderr
-
- err := cmd.Run()
- if err != nil {
- err := RunError{
- msg: fmt.Sprintf("Failed to run: %s %s: %s", name, strings.Join(arg, " "), strings.TrimSpace(string(stderr.Bytes()))),
- Stdout: string(stdout.Bytes()),
- Stderr: string(stderr.Bytes()),
- Err: err,
- }
- return string(stdout.Bytes()), string(stderr.Bytes()), err
- }
-
- return string(stdout.Bytes()), string(stderr.Bytes()), nil
-}
-
-// RunCommand runs a command with optional arguments and returns stdout. If the command fails to
-// start or returns a non-zero exit code then an error is returned containing the output of stderr.
-func RunCommand(name string, arg ...string) (string, error) {
- stdout, _, err := RunCommandSplit(nil, name, arg...)
- return stdout, err
-}
-
-// RunCommandCLocale runs a command with a LANG=C.UTF-8 environment set with optional arguments and
-// returns stdout. If the command fails to start or returns a non-zero exit code then an error is
-// returned containing the output of stderr.
-func RunCommandCLocale(name string, arg ...string) (string, error) {
- stdout, _, err := RunCommandSplit(append(os.Environ(), "LANG=C.UTF-8"), name, arg...)
- return stdout, err
-}
-
-func RunCommandWithFds(stdin io.Reader, stdout io.Writer, name string, arg ...string) error {
- cmd := exec.Command(name, arg...)
-
- if stdin != nil {
- cmd.Stdin = stdin
- }
-
- if stdout != nil {
- cmd.Stdout = stdout
- }
-
- var buffer bytes.Buffer
- cmd.Stderr = &buffer
-
- err := cmd.Run()
- if err != nil {
- err := RunError{
- msg: fmt.Sprintf("Failed to run: %s %s: %s", name, strings.Join(arg, " "),
- strings.TrimSpace(buffer.String())),
- Err: err,
- }
-
- return err
- }
-
- return nil
-}
-
-func TryRunCommand(name string, arg ...string) (string, error) {
- var err error
- var output string
-
- for i := 0; i < 20; i++ {
- output, err = RunCommand(name, arg...)
- if err == nil {
- break
- }
-
- time.Sleep(500 * time.Millisecond)
- }
-
- return output, err
-}
-
-func TimeIsSet(ts time.Time) bool {
- if ts.Unix() <= 0 {
- return false
- }
-
- if ts.UTC().Unix() <= 0 {
- return false
- }
-
- return true
-}
-
-// WriteTempFile creates a temp file with the specified content
-func WriteTempFile(dir string, prefix string, content string) (string, error) {
- f, err := ioutil.TempFile(dir, prefix)
- if err != nil {
- return "", err
- }
- defer f.Close()
-
- _, err = f.WriteString(content)
- return f.Name(), err
-}
-
-// EscapePathFstab escapes a path fstab-style.
-// This ensures that getmntent_r() and friends can correctly parse stuff like
-// /some/wacky path with spaces /some/wacky target with spaces
-func EscapePathFstab(path string) string {
- r := strings.NewReplacer(
- " ", "\\040",
- "\t", "\\011",
- "\n", "\\012",
- "\\", "\\\\")
- return r.Replace(path)
-}
-
-func SetProgressMetadata(metadata map[string]interface{}, stage, displayPrefix string, percent, processed, speed int64) {
- progress := make(map[string]string)
- // stage, percent, speed sent for API callers.
- progress["stage"] = stage
- if processed > 0 {
- progress["processed"] = strconv.FormatInt(processed, 10)
- }
-
- if percent > 0 {
- progress["percent"] = strconv.FormatInt(percent, 10)
- }
-
- progress["speed"] = strconv.FormatInt(speed, 10)
- metadata["progress"] = progress
-
- // _progress with formatted text sent for lxc cli.
- if percent > 0 {
- metadata[stage+"_progress"] = fmt.Sprintf("%s: %d%% (%s/s)", displayPrefix, percent, units.GetByteSizeString(speed, 2))
- } else if processed > 0 {
- metadata[stage+"_progress"] = fmt.Sprintf("%s: %s (%s/s)", displayPrefix, units.GetByteSizeString(processed, 2), units.GetByteSizeString(speed, 2))
- } else {
- metadata[stage+"_progress"] = fmt.Sprintf("%s: %s/s", displayPrefix, units.GetByteSizeString(speed, 2))
- }
-}
-
-func DownloadFileHash(httpClient *http.Client, useragent string, progress func(progress ioprogress.ProgressData), canceler *cancel.Canceler, filename string, url string, hash string, hashFunc hash.Hash, target io.WriteSeeker) (int64, error) {
- // Always seek to the beginning
- target.Seek(0, 0)
-
- // Prepare the download request
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return -1, err
- }
-
- if useragent != "" {
- req.Header.Set("User-Agent", useragent)
- }
-
- // Perform the request
- r, doneCh, err := cancel.CancelableDownload(canceler, httpClient, req)
- if err != nil {
- return -1, err
- }
- defer r.Body.Close()
- defer close(doneCh)
-
- if r.StatusCode != http.StatusOK {
- return -1, fmt.Errorf("Unable to fetch %s: %s", url, r.Status)
- }
-
- // Handle the data
- body := r.Body
- if progress != nil {
- body = &ioprogress.ProgressReader{
- ReadCloser: r.Body,
- Tracker: &ioprogress.ProgressTracker{
- Length: r.ContentLength,
- Handler: func(percent int64, speed int64) {
- if filename != "" {
- progress(ioprogress.ProgressData{Text: fmt.Sprintf("%s: %d%% (%s/s)", filename, percent, units.GetByteSizeString(speed, 2))})
- } else {
- progress(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))})
- }
- },
- },
- }
- }
-
- var size int64
-
- if hashFunc != nil {
- size, err = io.Copy(io.MultiWriter(target, hashFunc), body)
- if err != nil {
- return -1, err
- }
-
- result := fmt.Sprintf("%x", hashFunc.Sum(nil))
- if result != hash {
- return -1, fmt.Errorf("Hash mismatch for %s: %s != %s", url, result, hash)
- }
- } else {
- size, err = io.Copy(target, body)
- if err != nil {
- return -1, err
- }
- }
-
- return size, nil
-}
-
-func ParseNumberFromFile(file string) (int64, error) {
- f, err := os.Open(file)
- if err != nil {
- return int64(0), err
- }
- defer f.Close()
-
- buf := make([]byte, 4096)
- n, err := f.Read(buf)
- if err != nil {
- return int64(0), err
- }
-
- str := strings.TrimSpace(string(buf[0:n]))
- nr, err := strconv.Atoi(str)
- if err != nil {
- return int64(0), err
- }
-
- return int64(nr), nil
-}
-
-type ReadSeeker struct {
- io.Reader
- io.Seeker
-}
-
-func NewReadSeeker(reader io.Reader, seeker io.Seeker) *ReadSeeker {
- return &ReadSeeker{Reader: reader, Seeker: seeker}
-}
-
-func (r *ReadSeeker) Read(p []byte) (n int, err error) {
- return r.Reader.Read(p)
-}
-
-func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) {
- return r.Seeker.Seek(offset, whence)
-}
-
-// RenderTemplate renders a pongo2 template.
-func RenderTemplate(template string, ctx pongo2.Context) (string, error) {
- // Load template from string
- tpl, err := pongo2.FromString("{% autoescape off %}" + template + "{% endautoescape %}")
- if err != nil {
- return "", err
- }
-
- // Get rendered template
- ret, err := tpl.Execute(ctx)
- if err != nil {
- return ret, err
- }
-
- // Looks like we're nesting templates so run pongo again
- if strings.Contains(ret, "{{") || strings.Contains(ret, "{%") {
- return RenderTemplate(ret, ctx)
- }
-
- return ret, err
-}
-
-func GetSnapshotExpiry(refDate time.Time, s string) (time.Time, error) {
- expr := strings.TrimSpace(s)
-
- if expr == "" {
- return time.Time{}, nil
- }
-
- re := regexp.MustCompile(`^(\d+)(M|H|d|w|m|y)$`)
- expiry := map[string]int{
- "M": 0,
- "H": 0,
- "d": 0,
- "w": 0,
- "m": 0,
- "y": 0,
- }
-
- values := strings.Split(expr, " ")
-
- if len(values) == 0 {
- return time.Time{}, nil
- }
-
- for _, value := range values {
- fields := re.FindStringSubmatch(value)
- if fields == nil {
- return time.Time{}, fmt.Errorf("Invalid expiry expression")
- }
-
- if expiry[fields[2]] > 0 {
- // We don't allow fields to be set multiple times
- return time.Time{}, fmt.Errorf("Invalid expiry expression")
- }
-
- val, err := strconv.Atoi(fields[1])
- if err != nil {
- return time.Time{}, err
- }
-
- expiry[fields[2]] = val
-
- }
-
- t := refDate.AddDate(expiry["y"], expiry["m"], expiry["d"]+expiry["w"]*7).Add(
- time.Hour*time.Duration(expiry["H"]) + time.Minute*time.Duration(expiry["M"]))
-
- return t, nil
-}
diff --git a/vendor/github.com/lxc/lxd/shared/util_linux.go b/vendor/github.com/lxc/lxd/shared/util_linux.go
deleted file mode 100644
index f2b882741695..000000000000
--- a/vendor/github.com/lxc/lxd/shared/util_linux.go
+++ /dev/null
@@ -1,378 +0,0 @@
-// +build linux
-
-package shared
-
-import (
- "bufio"
- "fmt"
- "os"
- "path/filepath"
- "reflect"
- "strings"
- "syscall"
- "unsafe"
-
- "golang.org/x/sys/unix"
-
- "github.com/lxc/lxd/shared/units"
-)
-
-// --- pure Go functions ---
-
-func GetFileStat(p string) (uid int, gid int, major uint32, minor uint32, inode uint64, nlink int, err error) {
- var stat unix.Stat_t
- err = unix.Lstat(p, &stat)
- if err != nil {
- return
- }
- uid = int(stat.Uid)
- gid = int(stat.Gid)
- inode = uint64(stat.Ino)
- nlink = int(stat.Nlink)
- if stat.Mode&unix.S_IFBLK != 0 || stat.Mode&unix.S_IFCHR != 0 {
- major = unix.Major(stat.Rdev)
- minor = unix.Minor(stat.Rdev)
- }
-
- return
-}
-
-// GetPathMode returns a os.FileMode for the provided path
-func GetPathMode(path string) (os.FileMode, error) {
- fi, err := os.Stat(path)
- if err != nil {
- return os.FileMode(0000), err
- }
-
- mode, _, _ := GetOwnerMode(fi)
- return mode, nil
-}
-
-func parseMountinfo(name string) int {
- // In case someone uses symlinks we need to look for the actual
- // mountpoint.
- actualPath, err := filepath.EvalSymlinks(name)
- if err != nil {
- return -1
- }
-
- f, err := os.Open("/proc/self/mountinfo")
- if err != nil {
- return -1
- }
- defer f.Close()
-
- scanner := bufio.NewScanner(f)
- for scanner.Scan() {
- line := scanner.Text()
- tokens := strings.Fields(line)
- if len(tokens) < 5 {
- return -1
- }
- cleanPath := filepath.Clean(tokens[4])
- if cleanPath == actualPath {
- return 1
- }
- }
-
- return 0
-}
-
-func IsMountPoint(name string) bool {
- ret := parseMountinfo(name)
- if ret == 1 {
- return true
- }
-
- stat, err := os.Stat(name)
- if err != nil {
- return false
- }
-
- rootStat, err := os.Lstat(name + "/..")
- if err != nil {
- return false
- }
- // If the directory has the same device as parent, then it's not a mountpoint.
- return stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev
-}
-
-func SetSize(fd int, width int, height int) (err error) {
- var dimensions [4]uint16
- dimensions[0] = uint16(height)
- dimensions[1] = uint16(width)
-
- if _, _, err := unix.Syscall6(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.TIOCSWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
- return err
- }
- return nil
-}
-
-// This uses ssize_t llistxattr(const char *path, char *list, size_t size); to
-// handle symbolic links (should it in the future be possible to set extended
-// attributed on symlinks): If path is a symbolic link the extended attributes
-// associated with the link itself are retrieved.
-func llistxattr(path string, list []byte) (sz int, err error) {
- var _p0 *byte
- _p0, err = unix.BytePtrFromString(path)
- if err != nil {
- return
- }
- var _p1 unsafe.Pointer
- if len(list) > 0 {
- _p1 = unsafe.Pointer(&list[0])
- } else {
- _p1 = unsafe.Pointer(nil)
- }
- r0, _, e1 := unix.Syscall(unix.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(list)))
- sz = int(r0)
- if e1 != 0 {
- err = e1
- }
- return
-}
-
-// GetAllXattr retrieves all extended attributes associated with a file,
-// directory or symbolic link.
-func GetAllXattr(path string) (xattrs map[string]string, err error) {
- e1 := fmt.Errorf("Extended attributes changed during retrieval")
-
- // Call llistxattr() twice: First, to determine the size of the buffer
- // we need to allocate to store the extended attributes, second, to
- // actually store the extended attributes in the buffer. Also, check if
- // the size/number of extended attributes hasn't changed between the two
- // calls.
- pre, err := llistxattr(path, nil)
- if err != nil || pre < 0 {
- return nil, err
- }
- if pre == 0 {
- return nil, nil
- }
-
- dest := make([]byte, pre)
-
- post, err := llistxattr(path, dest)
- if err != nil || post < 0 {
- return nil, err
- }
- if post != pre {
- return nil, e1
- }
-
- split := strings.Split(string(dest), "\x00")
- if split == nil {
- return nil, fmt.Errorf("No valid extended attribute key found")
- }
- // *listxattr functions return a list of names as an unordered array
- // of null-terminated character strings (attribute names are separated
- // by null bytes ('\0')), like this: user.name1\0system.name1\0user.name2\0
- // Since we split at the '\0'-byte the last element of the slice will be
- // the empty string. We remove it:
- if split[len(split)-1] == "" {
- split = split[:len(split)-1]
- }
-
- xattrs = make(map[string]string, len(split))
-
- for _, x := range split {
- xattr := string(x)
- // Call Getxattr() twice: First, to determine the size of the
- // buffer we need to allocate to store the extended attributes,
- // second, to actually store the extended attributes in the
- // buffer. Also, check if the size of the extended attribute
- // hasn't changed between the two calls.
- pre, err = unix.Getxattr(path, xattr, nil)
- if err != nil || pre < 0 {
- return nil, err
- }
-
- dest = make([]byte, pre)
- post := 0
- if pre > 0 {
- post, err = unix.Getxattr(path, xattr, dest)
- if err != nil || post < 0 {
- return nil, err
- }
- }
-
- if post != pre {
- return nil, e1
- }
-
- xattrs[xattr] = string(dest)
- }
-
- return xattrs, nil
-}
-
-var ObjectFound = fmt.Errorf("Found requested object")
-
-func LookupUUIDByBlockDevPath(diskDevice string) (string, error) {
- uuid := ""
- readUUID := func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- if (info.Mode() & os.ModeSymlink) == os.ModeSymlink {
- link, err := os.Readlink(path)
- if err != nil {
- return err
- }
-
- // filepath.Join() will call Clean() on the result and
- // thus resolve those ugly "../../" parts that make it
- // hard to compare the strings.
- absPath := filepath.Join("/dev/disk/by-uuid", link)
- if absPath == diskDevice {
- uuid = path
- // Will allows us to avoid needlessly travers
- // the whole directory.
- return ObjectFound
- }
- }
- return nil
- }
-
- err := filepath.Walk("/dev/disk/by-uuid", readUUID)
- if err != nil && err != ObjectFound {
- return "", fmt.Errorf("Failed to detect UUID: %s", err)
- }
-
- if uuid == "" {
- return "", fmt.Errorf("Failed to detect UUID")
- }
-
- lastSlash := strings.LastIndex(uuid, "/")
- return uuid[lastSlash+1:], nil
-}
-
-// Detect whether err is an errno.
-func GetErrno(err error) (errno error, iserrno bool) {
- sysErr, ok := err.(*os.SyscallError)
- if ok {
- return sysErr.Err, true
- }
-
- pathErr, ok := err.(*os.PathError)
- if ok {
- return pathErr.Err, true
- }
-
- tmpErrno, ok := err.(unix.Errno)
- if ok {
- return tmpErrno, true
- }
-
- return nil, false
-}
-
-// Utsname returns the same info as unix.Utsname, as strings
-type Utsname struct {
- Sysname string
- Nodename string
- Release string
- Version string
- Machine string
- Domainname string
-}
-
-// Uname returns Utsname as strings
-func Uname() (*Utsname, error) {
- /*
- * Based on: https://groups.google.com/forum/#!topic/golang-nuts/Jel8Bb-YwX8
- * there is really no better way to do this, which is
- * unfortunate. Also, we ditch the more accepted CharsToString
- * version in that thread, since it doesn't seem as portable,
- * viz. github issue #206.
- */
-
- uname := unix.Utsname{}
- err := unix.Uname(&uname)
- if err != nil {
- return nil, err
- }
-
- return &Utsname{
- Sysname: intArrayToString(uname.Sysname),
- Nodename: intArrayToString(uname.Nodename),
- Release: intArrayToString(uname.Release),
- Version: intArrayToString(uname.Version),
- Machine: intArrayToString(uname.Machine),
- Domainname: intArrayToString(uname.Domainname),
- }, nil
-}
-
-func intArrayToString(arr interface{}) string {
- slice := reflect.ValueOf(arr)
- s := ""
- for i := 0; i < slice.Len(); i++ {
- val := slice.Index(i)
- valInt := int64(-1)
-
- switch val.Kind() {
- case reflect.Int:
- case reflect.Int8:
- valInt = int64(val.Int())
- case reflect.Uint:
- case reflect.Uint8:
- valInt = int64(val.Uint())
- default:
- continue
- }
-
- if valInt == 0 {
- break
- }
-
- s += string(byte(valInt))
- }
-
- return s
-}
-
-func Statvfs(path string) (*unix.Statfs_t, error) {
- var st unix.Statfs_t
-
- err := unix.Statfs(path, &st)
- if err != nil {
- return nil, err
- }
-
- return &st, nil
-}
-
-func DeviceTotalMemory() (int64, error) {
- // Open /proc/meminfo
- f, err := os.Open("/proc/meminfo")
- if err != nil {
- return -1, err
- }
- defer f.Close()
-
- // Read it line by line
- scan := bufio.NewScanner(f)
- for scan.Scan() {
- line := scan.Text()
-
- // We only care about MemTotal
- if !strings.HasPrefix(line, "MemTotal:") {
- continue
- }
-
- // Extract the before last (value) and last (unit) fields
- fields := strings.Split(line, " ")
- value := fields[len(fields)-2] + fields[len(fields)-1]
-
- // Feed the result to units.ParseByteSizeString to get an int value
- valueBytes, err := units.ParseByteSizeString(value)
- if err != nil {
- return -1, err
- }
-
- return valueBytes, nil
- }
-
- return -1, fmt.Errorf("Couldn't find MemTotal")
-}
diff --git a/vendor/github.com/lxc/lxd/shared/util_linux_cgo.go b/vendor/github.com/lxc/lxd/shared/util_linux_cgo.go
deleted file mode 100644
index bcf7e4f1cbb1..000000000000
--- a/vendor/github.com/lxc/lxd/shared/util_linux_cgo.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// +build linux
-// +build cgo
-
-package shared
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "sync"
- "sync/atomic"
- "unsafe"
-
- "golang.org/x/sys/unix"
-
- "github.com/lxc/lxd/shared/logger"
-)
-
-/*
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE 1
-#endif
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define ABSTRACT_UNIX_SOCK_LEN sizeof(((struct sockaddr_un *)0)->sun_path)
-
-// This is an adaption from https://codereview.appspot.com/4589049, to be
-// included in the stdlib with the stdlib's license.
-
-void configure_pty(int fd) {
- struct termios term_settings;
- struct winsize win;
-
- if (tcgetattr(fd, &term_settings) < 0) {
- fprintf(stderr, "Failed to get settings: %s\n", strerror(errno));
- return;
- }
-
- term_settings.c_iflag |= IMAXBEL;
- term_settings.c_iflag |= IUTF8;
- term_settings.c_iflag |= BRKINT;
- term_settings.c_iflag |= IXANY;
-
- term_settings.c_cflag |= HUPCL;
-
- if (tcsetattr(fd, TCSANOW, &term_settings) < 0) {
- fprintf(stderr, "Failed to set settings: %s\n", strerror(errno));
- return;
- }
-
- if (ioctl(fd, TIOCGWINSZ, &win) < 0) {
- fprintf(stderr, "Failed to get the terminal size: %s\n", strerror(errno));
- return;
- }
-
- win.ws_col = 80;
- win.ws_row = 25;
-
- if (ioctl(fd, TIOCSWINSZ, &win) < 0) {
- fprintf(stderr, "Failed to set the terminal size: %s\n", strerror(errno));
- return;
- }
-
- if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) {
- fprintf(stderr, "Failed to set FD_CLOEXEC: %s\n", strerror(errno));
- return;
- }
-
- return;
-}
-
-void create_pty(int *master, int *slave, uid_t uid, gid_t gid) {
- if (openpty(master, slave, NULL, NULL, NULL) < 0) {
- fprintf(stderr, "Failed to openpty: %s\n", strerror(errno));
- return;
- }
-
- configure_pty(*master);
- configure_pty(*slave);
-
- if (fchown(*slave, uid, gid) < 0) {
- fprintf(stderr, "Warning: error chowning pty to container root\n");
- fprintf(stderr, "Continuing...\n");
- }
-}
-
-void create_pipe(int *master, int *slave) {
- int pipefd[2];
-
- if (pipe2(pipefd, O_CLOEXEC) < 0) {
- fprintf(stderr, "Failed to create a pipe: %s\n", strerror(errno));
- return;
- }
-
- *master = pipefd[0];
- *slave = pipefd[1];
-}
-
-int get_poll_revents(int lfd, int timeout, int flags, int *revents, int *saved_errno)
-{
- int ret;
- struct pollfd pfd = {lfd, flags, 0};
-
-again:
- ret = poll(&pfd, 1, timeout);
- if (ret < 0) {
- if (errno == EINTR)
- goto again;
-
- *saved_errno = errno;
- fprintf(stderr, "Failed to poll() on file descriptor.\n");
- return -1;
- }
-
- *revents = pfd.revents;
-
- return ret;
-}
-*/
-import "C"
-
-const ABSTRACT_UNIX_SOCK_LEN int = C.ABSTRACT_UNIX_SOCK_LEN
-
-const POLLIN int = C.POLLIN
-const POLLPRI int = C.POLLPRI
-const POLLNVAL int = C.POLLNVAL
-const POLLERR int = C.POLLERR
-const POLLHUP int = C.POLLHUP
-const POLLRDHUP int = C.POLLRDHUP
-
-func GetPollRevents(fd int, timeout int, flags int) (int, int, error) {
- var err error
- revents := C.int(0)
- saved_errno := C.int(0)
-
- ret := C.get_poll_revents(C.int(fd), C.int(timeout), C.int(flags), &revents, &saved_errno)
- if int(ret) < 0 {
- err = unix.Errno(saved_errno)
- }
-
- return int(ret), int(revents), err
-}
-
-func OpenPty(uid, gid int64) (master *os.File, slave *os.File, err error) {
- fd_master := C.int(-1)
- fd_slave := C.int(-1)
- rootUid := C.uid_t(uid)
- rootGid := C.gid_t(gid)
-
- C.create_pty(&fd_master, &fd_slave, rootUid, rootGid)
-
- if fd_master == -1 || fd_slave == -1 {
- return nil, nil, errors.New("Failed to create a new pts pair")
- }
-
- master = os.NewFile(uintptr(fd_master), "master")
- slave = os.NewFile(uintptr(fd_slave), "slave")
-
- return master, slave, nil
-}
-
-func Pipe() (master *os.File, slave *os.File, err error) {
- fd_master := C.int(-1)
- fd_slave := C.int(-1)
-
- C.create_pipe(&fd_master, &fd_slave)
-
- if fd_master == -1 || fd_slave == -1 {
- return nil, nil, errors.New("Failed to create a new pipe")
- }
-
- master = os.NewFile(uintptr(fd_master), "master")
- slave = os.NewFile(uintptr(fd_slave), "slave")
-
- return master, slave, nil
-}
-
-// UserId is an adaption from https://codereview.appspot.com/4589049.
-func UserId(name string) (int, error) {
- var pw C.struct_passwd
- var result *C.struct_passwd
-
- bufSize := C.sysconf(C._SC_GETPW_R_SIZE_MAX)
- if bufSize < 0 {
- bufSize = 4096
- }
-
- buf := C.malloc(C.size_t(bufSize))
- if buf == nil {
- return -1, fmt.Errorf("allocation failed")
- }
- defer C.free(buf)
-
- cname := C.CString(name)
- defer C.free(unsafe.Pointer(cname))
-
-again:
- rv, errno := C.getpwnam_r(cname,
- &pw,
- (*C.char)(buf),
- C.size_t(bufSize),
- &result)
- if rv < 0 {
- // OOM killer will take care of us if we end up doing this too
- // often.
- if errno == unix.ERANGE {
- bufSize *= 2
- tmp := C.realloc(buf, C.size_t(bufSize))
- if tmp == nil {
- return -1, fmt.Errorf("allocation failed")
- }
- buf = tmp
- goto again
- }
- return -1, fmt.Errorf("failed user lookup: %s", unix.Errno(rv))
- }
-
- if result == nil {
- return -1, fmt.Errorf("unknown user %s", name)
- }
-
- return int(C.int(result.pw_uid)), nil
-}
-
-// GroupId is an adaption from https://codereview.appspot.com/4589049.
-func GroupId(name string) (int, error) {
- var grp C.struct_group
- var result *C.struct_group
-
- bufSize := C.sysconf(C._SC_GETGR_R_SIZE_MAX)
- if bufSize < 0 {
- bufSize = 4096
- }
-
- buf := C.malloc(C.size_t(bufSize))
- if buf == nil {
- return -1, fmt.Errorf("allocation failed")
- }
-
- cname := C.CString(name)
- defer C.free(unsafe.Pointer(cname))
-
-again:
- rv, errno := C.getgrnam_r(cname,
- &grp,
- (*C.char)(buf),
- C.size_t(bufSize),
- &result)
- if rv != 0 {
- // OOM killer will take care of us if we end up doing this too
- // often.
- if errno == unix.ERANGE {
- bufSize *= 2
- tmp := C.realloc(buf, C.size_t(bufSize))
- if tmp == nil {
- return -1, fmt.Errorf("allocation failed")
- }
- buf = tmp
- goto again
- }
-
- C.free(buf)
- return -1, fmt.Errorf("failed group lookup: %s", unix.Errno(rv))
- }
- C.free(buf)
-
- if result == nil {
- return -1, fmt.Errorf("unknown group %s", name)
- }
-
- return int(C.int(result.gr_gid)), nil
-}
-
-// Extensively commented directly in the code. Please leave the comments!
-// Looking at this in a couple of months noone will know why and how this works
-// anymore.
-func ExecReaderToChannel(r io.Reader, bufferSize int, exited <-chan bool, fd int) <-chan []byte {
- if bufferSize <= (128 * 1024) {
- bufferSize = (128 * 1024)
- }
-
- ch := make(chan ([]byte))
-
- // Takes care that the closeChannel() function is exactly executed once.
- // This allows us to avoid using a mutex.
- var once sync.Once
- closeChannel := func() {
- close(ch)
- }
-
- // [1]: This function has just one job: Dealing with the case where we
- // are running an interactive shell session where we put a process in
- // the background that does hold stdin/stdout open, but does not
- // generate any output at all. This case cannot be dealt with in the
- // following function call. Here's why: Assume the above case, now the
- // attached child (the shell in this example) exits. This will not
- // generate any poll() event: We won't get POLLHUP because the
- // background process is holding stdin/stdout open and noone is writing
- // to it. So we effectively block on GetPollRevents() in the function
- // below. Hence, we use another go routine here who's only job is to
- // handle that case: When we detect that the child has exited we check
- // whether a POLLIN or POLLHUP event has been generated. If not, we know
- // that there's nothing buffered on stdout and exit.
- var attachedChildIsDead int32 = 0
- go func() {
- <-exited
-
- atomic.StoreInt32(&attachedChildIsDead, 1)
-
- ret, revents, err := GetPollRevents(fd, 0, (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLRDHUP | POLLNVAL))
- if ret < 0 {
- logger.Errorf("Failed to poll(POLLIN | POLLPRI | POLLHUP | POLLRDHUP) on file descriptor: %s.", err)
- } else if ret > 0 {
- if (revents & POLLERR) > 0 {
- logger.Warnf("Detected poll(POLLERR) event.")
- } else if (revents & POLLNVAL) > 0 {
- logger.Warnf("Detected poll(POLLNVAL) event.")
- }
- } else if ret == 0 {
- logger.Debugf("No data in stdout: exiting.")
- once.Do(closeChannel)
- return
- }
- }()
-
- go func() {
- readSize := (128 * 1024)
- offset := 0
- buf := make([]byte, bufferSize)
- avoidAtomicLoad := false
-
- defer once.Do(closeChannel)
- for {
- nr := 0
- var err error
-
- ret, revents, err := GetPollRevents(fd, -1, (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLRDHUP | POLLNVAL))
- if ret < 0 {
- // This condition is only reached in cases where we are massively f*cked since we even handle
- // EINTR in the underlying C wrapper around poll(). So let's exit here.
- logger.Errorf("Failed to poll(POLLIN | POLLPRI | POLLERR | POLLHUP | POLLRDHUP) on file descriptor: %s. Exiting.", err)
- return
- }
-
- // [2]: If the process exits before all its data has been read by us and no other process holds stdin or
- // stdout open, then we will observe a (POLLHUP | POLLRDHUP | POLLIN) event. This means, we need to
- // keep on reading from the pty file descriptor until we get a simple POLLHUP back.
- both := ((revents & (POLLIN | POLLPRI)) > 0) && ((revents & (POLLHUP | POLLRDHUP)) > 0)
- if both {
- logger.Debugf("Detected poll(POLLIN | POLLPRI | POLLHUP | POLLRDHUP) event.")
- read := buf[offset : offset+readSize]
- nr, err = r.Read(read)
- }
-
- if (revents & POLLERR) > 0 {
- logger.Warnf("Detected poll(POLLERR) event: exiting.")
- return
- } else if (revents & POLLNVAL) > 0 {
- logger.Warnf("Detected poll(POLLNVAL) event: exiting.")
- return
- }
-
- if ((revents & (POLLIN | POLLPRI)) > 0) && !both {
- // This might appear unintuitive at first but is actually a nice trick: Assume we are running
- // a shell session in a container and put a process in the background that is writing to
- // stdout. Now assume the attached process (aka the shell in this example) exits because we
- // used Ctrl+D to send EOF or something. If no other process would be holding stdout open we
- // would expect to observe either a (POLLHUP | POLLRDHUP | POLLIN | POLLPRI) event if there
- // is still data buffered from the previous process or a simple (POLLHUP | POLLRDHUP) if
- // no data is buffered. The fact that we only observe a (POLLIN | POLLPRI) event means that
- // another process is holding stdout open and is writing to it.
- // One counter argument that can be leveraged is (brauner looks at tycho :))
- // "Hey, you need to write at least one additional tty buffer to make sure that
- // everything that the attached child has written is actually shown."
- // The answer to that is:
- // "This case can only happen if the process has exited and has left data in stdout which
- // would generate a (POLLIN | POLLPRI | POLLHUP | POLLRDHUP) event and this case is already
- // handled and triggers another codepath. (See [2].)"
- if avoidAtomicLoad || atomic.LoadInt32(&attachedChildIsDead) == 1 {
- avoidAtomicLoad = true
- // Handle race between atomic.StorInt32() in the go routine
- // explained in [1] and atomic.LoadInt32() in the go routine
- // here:
- // We need to check for (POLLHUP | POLLRDHUP) here again since we might
- // still be handling a pure POLLIN event from a write prior to the childs
- // exit. But the child might have exited right before and performed
- // atomic.StoreInt32() to update attachedChildIsDead before we
- // performed our atomic.LoadInt32(). This means we accidentally hit this
- // codepath and are misinformed about the available poll() events. So we
- // need to perform a non-blocking poll() again to exclude that case:
- //
- // - If we detect no (POLLHUP | POLLRDHUP) event we know the child
- // has already exited but someone else is holding stdin/stdout open and
- // writing to it.
- // Note that his case should only ever be triggered in situations like
- // running a shell and doing stuff like:
- // > ./lxc exec xen1 -- bash
- // root@xen1:~# yes &
- // .
- // .
- // .
- // now send Ctrl+D or type "exit". By the time the Ctrl+D/exit event is
- // triggered, we will have read all of the childs data it has written to
- // stdout and so we can assume that anything that comes now belongs to
- // the process that is holding stdin/stdout open.
- //
- // - If we detect a (POLLHUP | POLLRDHUP) event we know that we've
- // hit this codepath on accident caused by the race between
- // atomic.StoreInt32() in the go routine explained in [1] and
- // atomic.LoadInt32() in this go routine. So the next call to
- // GetPollRevents() will either return
- // (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLRDHUP)
- // or (POLLHUP | POLLRDHUP). Both will trigger another codepath (See [2].)
- // that takes care that all data of the child that is buffered in
- // stdout is written out.
- ret, revents, err := GetPollRevents(fd, 0, (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLRDHUP | POLLNVAL))
- if ret < 0 {
- logger.Errorf("Failed to poll(POLLIN | POLLPRI | POLLERR | POLLHUP | POLLRDHUP) on file descriptor: %s. Exiting.", err)
- return
- } else if (revents & (POLLHUP | POLLRDHUP | POLLERR | POLLNVAL)) == 0 {
- logger.Debugf("Exiting but background processes are still running.")
- return
- }
- }
- read := buf[offset : offset+readSize]
- nr, err = r.Read(read)
- }
-
- // The attached process has exited and we have read all data that may have
- // been buffered.
- if ((revents & (POLLHUP | POLLRDHUP)) > 0) && !both {
- logger.Debugf("Detected poll(POLLHUP) event: exiting.")
- return
- }
-
- offset += nr
- if offset > 0 && (offset+readSize >= bufferSize || err != nil) {
- ch <- buf[0:offset]
- offset = 0
- buf = make([]byte, bufferSize)
- }
- }
- }()
-
- return ch
-}
diff --git a/vendor/github.com/lxc/lxd/shared/util_linux_notcgo.go b/vendor/github.com/lxc/lxd/shared/util_linux_notcgo.go
deleted file mode 100644
index 9f82988c8d69..000000000000
--- a/vendor/github.com/lxc/lxd/shared/util_linux_notcgo.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// +build linux,!cgo
-
-package shared
-
-const ABSTRACT_UNIX_SOCK_LEN int = 107
diff --git a/vendor/github.com/lxc/lxd/shared/util_unix.go b/vendor/github.com/lxc/lxd/shared/util_unix.go
deleted file mode 100644
index f87a109333fa..000000000000
--- a/vendor/github.com/lxc/lxd/shared/util_unix.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build !windows
-
-package shared
-
-import (
- "os"
- "syscall"
-)
-
-func GetOwnerMode(fInfo os.FileInfo) (os.FileMode, int, int) {
- mode := fInfo.Mode()
- uid := int(fInfo.Sys().(*syscall.Stat_t).Uid)
- gid := int(fInfo.Sys().(*syscall.Stat_t).Gid)
- return mode, uid, gid
-}
diff --git a/vendor/github.com/lxc/lxd/shared/util_windows.go b/vendor/github.com/lxc/lxd/shared/util_windows.go
deleted file mode 100644
index 7a480f5bc29b..000000000000
--- a/vendor/github.com/lxc/lxd/shared/util_windows.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build windows
-
-package shared
-
-import (
- "os"
-)
-
-func GetOwnerMode(fInfo os.FileInfo) (os.FileMode, int, int) {
- return fInfo.Mode(), -1, -1
-}
diff --git a/vendor/gopkg.in/robfig/cron.v2/.gitignore b/vendor/github.com/soheilhy/cmux/.gitignore
similarity index 94%
rename from vendor/gopkg.in/robfig/cron.v2/.gitignore
rename to vendor/github.com/soheilhy/cmux/.gitignore
index 00268614f045..daf913b1b347 100644
--- a/vendor/gopkg.in/robfig/cron.v2/.gitignore
+++ b/vendor/github.com/soheilhy/cmux/.gitignore
@@ -20,3 +20,5 @@ _cgo_export.*
_testmain.go
*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/soheilhy/cmux/.travis.yml b/vendor/github.com/soheilhy/cmux/.travis.yml
new file mode 100644
index 000000000000..4bc48e0669c0
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/.travis.yml
@@ -0,0 +1,29 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - 1.8
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+gobuild_args: -race
+
+before_install:
+ - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u github.com/kisielk/errcheck; fi
+ - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u github.com/golang/lint/golint; fi
+
+before_script:
+ - '! gofmt -s -l . | read'
+ - echo $TRAVIS_GO_VERSION
+ - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then golint ./...; fi
+ - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then errcheck ./...; fi
+ - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet .; fi
+ - if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet --shadow .; fi
+
+script:
+ - go test -bench . -v ./...
+ - go test -race -bench . -v ./...
diff --git a/vendor/github.com/soheilhy/cmux/CONTRIBUTORS b/vendor/github.com/soheilhy/cmux/CONTRIBUTORS
new file mode 100644
index 000000000000..49878f228a12
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/CONTRIBUTORS
@@ -0,0 +1,12 @@
+# The list of people who have contributed code to the cmux repository.
+#
+# Auto-generated with:
+# git log --oneline --pretty=format:'%an <%aE>' | sort -u
+#
+Andreas Jaekle
+Dmitri Shuralyov
+Ethan Mosbaugh
+Soheil Hassas Yeganeh
+Soheil Hassas Yeganeh
+Tamir Duberstein
+Tamir Duberstein
diff --git a/vendor/github.com/soheilhy/cmux/LICENSE b/vendor/github.com/soheilhy/cmux/LICENSE
new file mode 100644
index 000000000000..d64569567334
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/soheilhy/cmux/README.md b/vendor/github.com/soheilhy/cmux/README.md
new file mode 100644
index 000000000000..70306e6ab622
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/README.md
@@ -0,0 +1,83 @@
+# cmux: Connection Mux ![Travis Build Status](https://api.travis-ci.org/soheilhy/args.svg?branch=master "Travis Build Status") [![GoDoc](https://godoc.org/github.com/soheilhy/cmux?status.svg)](http://godoc.org/github.com/soheilhy/cmux)
+
+cmux is a generic Go library to multiplex connections based on
+their payload. Using cmux, you can serve gRPC, SSH, HTTPS, HTTP,
+Go RPC, and pretty much any other protocol on the same TCP listener.
+
+## How-To
+Simply create your main listener, create a cmux for that listener,
+and then match connections:
+```go
+// Create the main listener.
+l, err := net.Listen("tcp", ":23456")
+if err != nil {
+ log.Fatal(err)
+}
+
+// Create a cmux.
+m := cmux.New(l)
+
+// Match connections in order:
+// First grpc, then HTTP, and otherwise Go RPC/TCP.
+grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
+httpL := m.Match(cmux.HTTP1Fast())
+trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched.
+
+// Create your protocol servers.
+grpcS := grpc.NewServer()
+grpchello.RegisterGreeterServer(grpcs, &server{})
+
+httpS := &http.Server{
+ Handler: &helloHTTP1Handler{},
+}
+
+trpcS := rpc.NewServer()
+trpcS.Register(&ExampleRPCRcvr{})
+
+// Use the muxed listeners for your servers.
+go grpcS.Serve(grpcL)
+go httpS.Serve(httpL)
+go trpcS.Accept(trpcL)
+
+// Start serving!
+m.Serve()
+```
+
+Take a look at [other examples in the GoDoc](http://godoc.org/github.com/soheilhy/cmux/#pkg-examples).
+
+## Docs
+* [GoDocs](https://godoc.org/github.com/soheilhy/cmux)
+
+## Performance
+There is room for improvment but, since we are only matching
+the very first bytes of a connection, the performance overheads on
+long-lived connections (i.e., RPCs and pipelined HTTP streams)
+is negligible.
+
+*TODO(soheil)*: Add benchmarks.
+
+## Limitations
+* *TLS*: `net/http` uses a type assertion to identify TLS connections; since
+cmux's lookahead-implementing connection wraps the underlying TLS connection,
+this type assertion fails.
+Because of that, you can serve HTTPS using cmux but `http.Request.TLS`
+would not be set in your handlers.
+
+* *Different Protocols on The Same Connection*: `cmux` matches the connection
+when it's accepted. For example, one connection can be either gRPC or REST, but
+not both. That is, we assume that a client connection is either used for gRPC
+or REST.
+
+* *Java gRPC Clients*: Java gRPC client blocks until it receives a SETTINGS
+frame from the server. If you are using the Java client to connect to a cmux'ed
+gRPC server please match with writers:
+```go
+grpcl := m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
+```
+
+# Copyright and License
+Copyright 2016 The CMux Authors. All rights reserved.
+
+See [CONTRIBUTORS](https://github.com/soheilhy/cmux/blob/master/CONTRIBUTORS)
+for the CMux Authors. Code is released under
+[the Apache 2 license](https://github.com/soheilhy/cmux/blob/master/LICENSE).
diff --git a/vendor/github.com/soheilhy/cmux/buffer.go b/vendor/github.com/soheilhy/cmux/buffer.go
new file mode 100644
index 000000000000..f8cf30a1e66a
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/buffer.go
@@ -0,0 +1,67 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+ "bytes"
+ "io"
+)
+
+// bufferedReader is an optimized implementation of io.Reader that behaves like
+// ```
+// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer))
+// ```
+// without allocating.
+type bufferedReader struct {
+ source io.Reader
+ buffer bytes.Buffer
+ bufferRead int
+ bufferSize int
+ sniffing bool
+ lastErr error
+}
+
+func (s *bufferedReader) Read(p []byte) (int, error) {
+ if s.bufferSize > s.bufferRead {
+ // If we have already read something from the buffer before, we return the
+ // same data and the last error if any. We need to immediately return,
+ // otherwise we may block for ever, if we try to be smart and call
+ // source.Read() seeking a little bit of more data.
+ bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize])
+ s.bufferRead += bn
+ return bn, s.lastErr
+ } else if !s.sniffing && s.buffer.Cap() != 0 {
+ // We don't need the buffer anymore.
+ // Reset it to release the internal slice.
+ s.buffer = bytes.Buffer{}
+ }
+
+ // If there is nothing more to return in the sniffed buffer, read from the
+ // source.
+ sn, sErr := s.source.Read(p)
+ if sn > 0 && s.sniffing {
+ s.lastErr = sErr
+ if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil {
+ return wn, wErr
+ }
+ }
+ return sn, sErr
+}
+
+func (s *bufferedReader) reset(snif bool) {
+ s.sniffing = snif
+ s.bufferRead = 0
+ s.bufferSize = s.buffer.Len()
+}
diff --git a/vendor/github.com/soheilhy/cmux/cmux.go b/vendor/github.com/soheilhy/cmux/cmux.go
new file mode 100644
index 000000000000..80403423d8a6
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/cmux.go
@@ -0,0 +1,270 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "sync"
+ "time"
+)
+
+// Matcher matches a connection based on its content.
+type Matcher func(io.Reader) bool
+
+// MatchWriter is a match that can also write response (say to do handshake).
+type MatchWriter func(io.Writer, io.Reader) bool
+
+// ErrorHandler handles an error and returns whether
+// the mux should continue serving the listener.
+type ErrorHandler func(error) bool
+
+var _ net.Error = ErrNotMatched{}
+
+// ErrNotMatched is returned whenever a connection is not matched by any of
+// the matchers registered in the multiplexer.
+type ErrNotMatched struct {
+ c net.Conn
+}
+
+func (e ErrNotMatched) Error() string {
+ return fmt.Sprintf("mux: connection %v not matched by an matcher",
+ e.c.RemoteAddr())
+}
+
+// Temporary implements the net.Error interface.
+func (e ErrNotMatched) Temporary() bool { return true }
+
+// Timeout implements the net.Error interface.
+func (e ErrNotMatched) Timeout() bool { return false }
+
+type errListenerClosed string
+
+func (e errListenerClosed) Error() string { return string(e) }
+func (e errListenerClosed) Temporary() bool { return false }
+func (e errListenerClosed) Timeout() bool { return false }
+
+// ErrListenerClosed is returned from muxListener.Accept when the underlying
+// listener is closed.
+var ErrListenerClosed = errListenerClosed("mux: listener closed")
+
+// for readability of readTimeout
+var noTimeout time.Duration
+
+// New instantiates a new connection multiplexer.
+func New(l net.Listener) CMux {
+ return &cMux{
+ root: l,
+ bufLen: 1024,
+ errh: func(_ error) bool { return true },
+ donec: make(chan struct{}),
+ readTimeout: noTimeout,
+ }
+}
+
+// CMux is a multiplexer for network connections.
+type CMux interface {
+ // Match returns a net.Listener that sees (i.e., accepts) only
+ // the connections matched by at least one of the matcher.
+ //
+ // The order used to call Match determines the priority of matchers.
+ Match(...Matcher) net.Listener
+ // MatchWithWriters returns a net.Listener that accepts only the
+ // connections that matched by at least of the matcher writers.
+ //
+ // Prefer Matchers over MatchWriters, since the latter can write on the
+ // connection before the actual handler.
+ //
+ // The order used to call Match determines the priority of matchers.
+ MatchWithWriters(...MatchWriter) net.Listener
+ // Serve starts multiplexing the listener. Serve blocks and perhaps
+ // should be invoked concurrently within a go routine.
+ Serve() error
+ // HandleError registers an error handler that handles listener errors.
+ HandleError(ErrorHandler)
+ // sets a timeout for the read of matchers
+ SetReadTimeout(time.Duration)
+}
+
+type matchersListener struct {
+ ss []MatchWriter
+ l muxListener
+}
+
+type cMux struct {
+ root net.Listener
+ bufLen int
+ errh ErrorHandler
+ donec chan struct{}
+ sls []matchersListener
+ readTimeout time.Duration
+}
+
+func matchersToMatchWriters(matchers []Matcher) []MatchWriter {
+ mws := make([]MatchWriter, 0, len(matchers))
+ for _, m := range matchers {
+ cm := m
+ mws = append(mws, func(w io.Writer, r io.Reader) bool {
+ return cm(r)
+ })
+ }
+ return mws
+}
+
+func (m *cMux) Match(matchers ...Matcher) net.Listener {
+ mws := matchersToMatchWriters(matchers)
+ return m.MatchWithWriters(mws...)
+}
+
+func (m *cMux) MatchWithWriters(matchers ...MatchWriter) net.Listener {
+ ml := muxListener{
+ Listener: m.root,
+ connc: make(chan net.Conn, m.bufLen),
+ }
+ m.sls = append(m.sls, matchersListener{ss: matchers, l: ml})
+ return ml
+}
+
+func (m *cMux) SetReadTimeout(t time.Duration) {
+ m.readTimeout = t
+}
+
+func (m *cMux) Serve() error {
+ var wg sync.WaitGroup
+
+ defer func() {
+ close(m.donec)
+ wg.Wait()
+
+ for _, sl := range m.sls {
+ close(sl.l.connc)
+ // Drain the connections enqueued for the listener.
+ for c := range sl.l.connc {
+ _ = c.Close()
+ }
+ }
+ }()
+
+ for {
+ c, err := m.root.Accept()
+ if err != nil {
+ if !m.handleErr(err) {
+ return err
+ }
+ continue
+ }
+
+ wg.Add(1)
+ go m.serve(c, m.donec, &wg)
+ }
+}
+
+func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) {
+ defer wg.Done()
+
+ muc := newMuxConn(c)
+ if m.readTimeout > noTimeout {
+ _ = c.SetReadDeadline(time.Now().Add(m.readTimeout))
+ }
+ for _, sl := range m.sls {
+ for _, s := range sl.ss {
+ matched := s(muc.Conn, muc.startSniffing())
+ if matched {
+ muc.doneSniffing()
+ if m.readTimeout > noTimeout {
+ _ = c.SetReadDeadline(time.Time{})
+ }
+ select {
+ case sl.l.connc <- muc:
+ case <-donec:
+ _ = c.Close()
+ }
+ return
+ }
+ }
+ }
+
+ _ = c.Close()
+ err := ErrNotMatched{c: c}
+ if !m.handleErr(err) {
+ _ = m.root.Close()
+ }
+}
+
+func (m *cMux) HandleError(h ErrorHandler) {
+ m.errh = h
+}
+
+func (m *cMux) handleErr(err error) bool {
+ if !m.errh(err) {
+ return false
+ }
+
+ if ne, ok := err.(net.Error); ok {
+ return ne.Temporary()
+ }
+
+ return false
+}
+
+type muxListener struct {
+ net.Listener
+ connc chan net.Conn
+}
+
+func (l muxListener) Accept() (net.Conn, error) {
+ c, ok := <-l.connc
+ if !ok {
+ return nil, ErrListenerClosed
+ }
+ return c, nil
+}
+
+// MuxConn wraps a net.Conn and provides transparent sniffing of connection data.
+type MuxConn struct {
+ net.Conn
+ buf bufferedReader
+}
+
+func newMuxConn(c net.Conn) *MuxConn {
+ return &MuxConn{
+ Conn: c,
+ buf: bufferedReader{source: c},
+ }
+}
+
+// From the io.Reader documentation:
+//
+// When Read encounters an error or end-of-file condition after
+// successfully reading n > 0 bytes, it returns the number of
+// bytes read. It may return the (non-nil) error from the same call
+// or return the error (and n == 0) from a subsequent call.
+// An instance of this general case is that a Reader returning
+// a non-zero number of bytes at the end of the input stream may
+// return either err == EOF or err == nil. The next Read should
+// return 0, EOF.
+func (m *MuxConn) Read(p []byte) (int, error) {
+ return m.buf.Read(p)
+}
+
+func (m *MuxConn) startSniffing() io.Reader {
+ m.buf.reset(true)
+ return &m.buf
+}
+
+func (m *MuxConn) doneSniffing() {
+ m.buf.reset(false)
+}
diff --git a/vendor/github.com/soheilhy/cmux/doc.go b/vendor/github.com/soheilhy/cmux/doc.go
new file mode 100644
index 000000000000..aaa8f3158998
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+// Package cmux is a library to multiplex network connections based on
+// their payload. Using cmux, you can serve different protocols from the
+// same listener.
+package cmux
diff --git a/vendor/github.com/soheilhy/cmux/matchers.go b/vendor/github.com/soheilhy/cmux/matchers.go
new file mode 100644
index 000000000000..878ae98cc3cc
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/matchers.go
@@ -0,0 +1,267 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+ "bufio"
+ "crypto/tls"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+)
+
+// Any is a Matcher that matches any connection.
+func Any() Matcher {
+ return func(r io.Reader) bool { return true }
+}
+
+// PrefixMatcher returns a matcher that matches a connection if it
+// starts with any of the strings in strs.
+func PrefixMatcher(strs ...string) Matcher {
+ pt := newPatriciaTreeString(strs...)
+ return pt.matchPrefix
+}
+
+func prefixByteMatcher(list ...[]byte) Matcher {
+ pt := newPatriciaTree(list...)
+ return pt.matchPrefix
+}
+
+var defaultHTTPMethods = []string{
+ "OPTIONS",
+ "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ "DELETE",
+ "TRACE",
+ "CONNECT",
+}
+
+// HTTP1Fast only matches the methods in the HTTP request.
+//
+// This matcher is very optimistic: if it returns true, it does not mean that
+// the request is a valid HTTP response. If you want a correct but slower HTTP1
+// matcher, use HTTP1 instead.
+func HTTP1Fast(extMethods ...string) Matcher {
+ return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...)
+}
+
+// TLS matches HTTPS requests.
+//
+// By default, any TLS handshake packet is matched. An optional whitelist
+// of versions can be passed in to restrict the matcher, for example:
+// TLS(tls.VersionTLS11, tls.VersionTLS12)
+func TLS(versions ...int) Matcher {
+ if len(versions) == 0 {
+ versions = []int{
+ tls.VersionSSL30,
+ tls.VersionTLS10,
+ tls.VersionTLS11,
+ tls.VersionTLS12,
+ }
+ }
+ prefixes := [][]byte{}
+ for _, v := range versions {
+ prefixes = append(prefixes, []byte{22, byte(v >> 8 & 0xff), byte(v & 0xff)})
+ }
+ return prefixByteMatcher(prefixes...)
+}
+
+const maxHTTPRead = 4096
+
+// HTTP1 parses the first line or upto 4096 bytes of the request to see if
+// the conection contains an HTTP request.
+func HTTP1() Matcher {
+ return func(r io.Reader) bool {
+ br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead})
+ l, part, err := br.ReadLine()
+ if err != nil || part {
+ return false
+ }
+
+ _, _, proto, ok := parseRequestLine(string(l))
+ if !ok {
+ return false
+ }
+
+ v, _, ok := http.ParseHTTPVersion(proto)
+ return ok && v == 1
+ }
+}
+
+// grabbed from net/http.
+func parseRequestLine(line string) (method, uri, proto string, ok bool) {
+ s1 := strings.Index(line, " ")
+ s2 := strings.Index(line[s1+1:], " ")
+ if s1 < 0 || s2 < 0 {
+ return
+ }
+ s2 += s1 + 1
+ return line[:s1], line[s1+1 : s2], line[s2+1:], true
+}
+
+// HTTP2 parses the frame header of the first frame to detect whether the
+// connection is an HTTP2 connection.
+func HTTP2() Matcher {
+ return hasHTTP2Preface
+}
+
+// HTTP1HeaderField returns a matcher matching the header fields of the first
+// request of an HTTP 1 connection.
+func HTTP1HeaderField(name, value string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP1Field(r, name, func(gotValue string) bool {
+ return gotValue == value
+ })
+ }
+}
+
+// HTTP1HeaderFieldPrefix returns a matcher matching the header fields of the
+// first request of an HTTP 1 connection. If the header with key name has a
+// value prefixed with valuePrefix, this will match.
+func HTTP1HeaderFieldPrefix(name, valuePrefix string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP1Field(r, name, func(gotValue string) bool {
+ return strings.HasPrefix(gotValue, valuePrefix)
+ })
+ }
+}
+
+// HTTP2HeaderField returns a matcher matching the header fields of the first
+// headers frame.
+func HTTP2HeaderField(name, value string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
+ return gotValue == value
+ })
+ }
+}
+
+// HTTP2HeaderFieldPrefix returns a matcher matching the header fields of the
+// first headers frame. If the header with key name has a value prefixed with
+// valuePrefix, this will match.
+func HTTP2HeaderFieldPrefix(name, valuePrefix string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
+ return strings.HasPrefix(gotValue, valuePrefix)
+ })
+ }
+}
+
+// HTTP2MatchHeaderFieldSendSettings matches the header field and writes the
+// settings to the server. Prefer HTTP2HeaderField over this one, if the client
+// does not block on receiving a SETTING frame.
+func HTTP2MatchHeaderFieldSendSettings(name, value string) MatchWriter {
+ return func(w io.Writer, r io.Reader) bool {
+ return matchHTTP2Field(w, r, name, func(gotValue string) bool {
+ return gotValue == value
+ })
+ }
+}
+
+// HTTP2MatchHeaderFieldPrefixSendSettings matches the header field prefix
+// and writes the settings to the server. Prefer HTTP2HeaderFieldPrefix over
+// this one, if the client does not block on receiving a SETTING frame.
+func HTTP2MatchHeaderFieldPrefixSendSettings(name, valuePrefix string) MatchWriter {
+ return func(w io.Writer, r io.Reader) bool {
+ return matchHTTP2Field(w, r, name, func(gotValue string) bool {
+ return strings.HasPrefix(gotValue, valuePrefix)
+ })
+ }
+}
+
+func hasHTTP2Preface(r io.Reader) bool {
+ var b [len(http2.ClientPreface)]byte
+ last := 0
+
+ for {
+ n, err := r.Read(b[last:])
+ if err != nil {
+ return false
+ }
+
+ last += n
+ eq := string(b[:last]) == http2.ClientPreface[:last]
+ if last == len(http2.ClientPreface) {
+ return eq
+ }
+ if !eq {
+ return false
+ }
+ }
+}
+
+func matchHTTP1Field(r io.Reader, name string, matches func(string) bool) (matched bool) {
+ req, err := http.ReadRequest(bufio.NewReader(r))
+ if err != nil {
+ return false
+ }
+
+ return matches(req.Header.Get(name))
+}
+
+func matchHTTP2Field(w io.Writer, r io.Reader, name string, matches func(string) bool) (matched bool) {
+ if !hasHTTP2Preface(r) {
+ return false
+ }
+
+ done := false
+ framer := http2.NewFramer(w, r)
+ hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) {
+ if hf.Name == name {
+ done = true
+ if matches(hf.Value) {
+ matched = true
+ }
+ }
+ })
+ for {
+ f, err := framer.ReadFrame()
+ if err != nil {
+ return false
+ }
+
+ switch f := f.(type) {
+ case *http2.SettingsFrame:
+ // Sender acknoweldged the SETTINGS frame. No need to write
+ // SETTINGS again.
+ if f.IsAck() {
+ break
+ }
+ if err := framer.WriteSettings(); err != nil {
+ return false
+ }
+ case *http2.ContinuationFrame:
+ if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
+ return false
+ }
+ done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0
+ case *http2.HeadersFrame:
+ if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
+ return false
+ }
+ done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0
+ }
+
+ if done {
+ return matched
+ }
+ }
+}
diff --git a/vendor/github.com/soheilhy/cmux/patricia.go b/vendor/github.com/soheilhy/cmux/patricia.go
new file mode 100644
index 000000000000..c3e3d85bdeaf
--- /dev/null
+++ b/vendor/github.com/soheilhy/cmux/patricia.go
@@ -0,0 +1,179 @@
+// Copyright 2016 The CMux Authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied. See the License for the specific language governing
+// permissions and limitations under the License.
+
+package cmux
+
+import (
+ "bytes"
+ "io"
+)
+
+// patriciaTree is a simple patricia tree that handles []byte instead of string
+// and cannot be changed after instantiation.
+type patriciaTree struct {
+ root *ptNode
+ maxDepth int // max depth of the tree.
+}
+
+func newPatriciaTree(bs ...[]byte) *patriciaTree {
+ max := 0
+ for _, b := range bs {
+ if max < len(b) {
+ max = len(b)
+ }
+ }
+ return &patriciaTree{
+ root: newNode(bs),
+ maxDepth: max + 1,
+ }
+}
+
+func newPatriciaTreeString(strs ...string) *patriciaTree {
+ b := make([][]byte, len(strs))
+ for i, s := range strs {
+ b[i] = []byte(s)
+ }
+ return newPatriciaTree(b...)
+}
+
+func (t *patriciaTree) matchPrefix(r io.Reader) bool {
+ buf := make([]byte, t.maxDepth)
+ n, _ := io.ReadFull(r, buf)
+ return t.root.match(buf[:n], true)
+}
+
+func (t *patriciaTree) match(r io.Reader) bool {
+ buf := make([]byte, t.maxDepth)
+ n, _ := io.ReadFull(r, buf)
+ return t.root.match(buf[:n], false)
+}
+
+type ptNode struct {
+ prefix []byte
+ next map[byte]*ptNode
+ terminal bool
+}
+
+func newNode(strs [][]byte) *ptNode {
+ if len(strs) == 0 {
+ return &ptNode{
+ prefix: []byte{},
+ terminal: true,
+ }
+ }
+
+ if len(strs) == 1 {
+ return &ptNode{
+ prefix: strs[0],
+ terminal: true,
+ }
+ }
+
+ p, strs := splitPrefix(strs)
+ n := &ptNode{
+ prefix: p,
+ }
+
+ nexts := make(map[byte][][]byte)
+ for _, s := range strs {
+ if len(s) == 0 {
+ n.terminal = true
+ continue
+ }
+ nexts[s[0]] = append(nexts[s[0]], s[1:])
+ }
+
+ n.next = make(map[byte]*ptNode)
+ for first, rests := range nexts {
+ n.next[first] = newNode(rests)
+ }
+
+ return n
+}
+
+func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) {
+ if len(bss) == 0 || len(bss[0]) == 0 {
+ return prefix, bss
+ }
+
+ if len(bss) == 1 {
+ return bss[0], [][]byte{{}}
+ }
+
+ for i := 0; ; i++ {
+ var cur byte
+ eq := true
+ for j, b := range bss {
+ if len(b) <= i {
+ eq = false
+ break
+ }
+
+ if j == 0 {
+ cur = b[i]
+ continue
+ }
+
+ if cur != b[i] {
+ eq = false
+ break
+ }
+ }
+
+ if !eq {
+ break
+ }
+
+ prefix = append(prefix, cur)
+ }
+
+ rest = make([][]byte, 0, len(bss))
+ for _, b := range bss {
+ rest = append(rest, b[len(prefix):])
+ }
+
+ return prefix, rest
+}
+
+func (n *ptNode) match(b []byte, prefix bool) bool {
+ l := len(n.prefix)
+ if l > 0 {
+ if l > len(b) {
+ l = len(b)
+ }
+ if !bytes.Equal(b[:l], n.prefix) {
+ return false
+ }
+ }
+
+ if n.terminal && (prefix || len(n.prefix) == len(b)) {
+ return true
+ }
+
+ if l >= len(b) {
+ return false
+ }
+
+ nextN, ok := n.next[b[l]]
+ if !ok {
+ return false
+ }
+
+ if l == len(b) {
+ b = b[l:l]
+ } else {
+ b = b[l+1:]
+ }
+ return nextN.match(b, prefix)
+}
diff --git a/vendor/github.com/tmc/grpc-websocket-proxy/LICENSE b/vendor/github.com/tmc/grpc-websocket-proxy/LICENSE
new file mode 100644
index 000000000000..95d0bc81f18a
--- /dev/null
+++ b/vendor/github.com/tmc/grpc-websocket-proxy/LICENSE
@@ -0,0 +1,7 @@
+Copyright (C) 2016 Travis Cline
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/doc.go b/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/doc.go
new file mode 100644
index 000000000000..baf9545e288c
--- /dev/null
+++ b/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/doc.go
@@ -0,0 +1,2 @@
+// Package wsproxy implements a websocket proxy for grpc-gateway backed services
+package wsproxy
diff --git a/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/websocket_proxy.go b/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/websocket_proxy.go
new file mode 100644
index 000000000000..0fca05a008ae
--- /dev/null
+++ b/vendor/github.com/tmc/grpc-websocket-proxy/wsproxy/websocket_proxy.go
@@ -0,0 +1,254 @@
+package wsproxy
+
+import (
+ "bufio"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/gorilla/websocket"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/net/context"
+)
+
+// MethodOverrideParam defines the special URL parameter that is translated into the subsequent proxied streaming http request's method.
+//
+// Deprecated: it is preferable to use the Options parameters to WebSocketProxy to supply parameters.
+var MethodOverrideParam = "method"
+
+// TokenCookieName defines the cookie name that is translated to an 'Authorization: Bearer' header in the streaming http request's headers.
+//
+// Deprecated: it is preferable to use the Options parameters to WebSocketProxy to supply parameters.
+var TokenCookieName = "token"
+
+// RequestMutatorFunc can supply an alternate outgoing request.
+type RequestMutatorFunc func(incoming *http.Request, outgoing *http.Request) *http.Request
+
+// Proxy provides websocket transport upgrade to compatible endpoints.
+type Proxy struct {
+ h http.Handler
+ logger Logger
+ methodOverrideParam string
+ tokenCookieName string
+ requestMutator RequestMutatorFunc
+}
+
+// Logger collects log messages.
+type Logger interface {
+ Warnln(...interface{})
+ Debugln(...interface{})
+}
+
+func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !websocket.IsWebSocketUpgrade(r) {
+ p.h.ServeHTTP(w, r)
+ return
+ }
+ p.proxy(w, r)
+}
+
+// Option allows customization of the proxy.
+type Option func(*Proxy)
+
+// WithMethodParamOverride allows specification of the special http parameter that is used in the proxied streaming request.
+func WithMethodParamOverride(param string) Option {
+ return func(p *Proxy) {
+ p.methodOverrideParam = param
+ }
+}
+
+// WithTokenCookieName allows specification of the cookie that is supplied as an upstream 'Authorization: Bearer' http header.
+func WithTokenCookieName(param string) Option {
+ return func(p *Proxy) {
+ p.tokenCookieName = param
+ }
+}
+
+// WithRequestMutator allows a custom RequestMutatorFunc to be supplied.
+func WithRequestMutator(fn RequestMutatorFunc) Option {
+ return func(p *Proxy) {
+ p.requestMutator = fn
+ }
+}
+
+// WithLogger allows a custom FieldLogger to be supplied
+func WithLogger(logger Logger) Option {
+ return func(p *Proxy) {
+ p.logger = logger
+ }
+}
+
+// WebsocketProxy attempts to expose the underlying handler as a bidi websocket stream with newline-delimited
+// JSON as the content encoding.
+//
+// The HTTP Authorization header is either populated from the Sec-Websocket-Protocol field or by a cookie.
+// The cookie name is specified by the TokenCookieName value.
+//
+// example:
+// Sec-Websocket-Protocol: Bearer, foobar
+// is converted to:
+// Authorization: Bearer foobar
+//
+// Method can be overwritten with the MethodOverrideParam get parameter in the requested URL
+func WebsocketProxy(h http.Handler, opts ...Option) http.Handler {
+ p := &Proxy{
+ h: h,
+ logger: logrus.New(),
+ methodOverrideParam: MethodOverrideParam,
+ tokenCookieName: TokenCookieName,
+ }
+ for _, o := range opts {
+ o(p)
+ }
+ return p
+}
+
+// TODO(tmc): allow modification of upgrader settings?
+var upgrader = websocket.Upgrader{
+ ReadBufferSize: 1024,
+ WriteBufferSize: 1024,
+ CheckOrigin: func(r *http.Request) bool { return true },
+}
+
+func isClosedConnError(err error) bool {
+ str := err.Error()
+ if strings.Contains(str, "use of closed network connection") {
+ return true
+ }
+ return websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway)
+}
+
+func (p *Proxy) proxy(w http.ResponseWriter, r *http.Request) {
+ var responseHeader http.Header
+ // If Sec-WebSocket-Protocol starts with "Bearer", respond in kind.
+ // TODO(tmc): consider customizability/extension point here.
+ if strings.HasPrefix(r.Header.Get("Sec-WebSocket-Protocol"), "Bearer") {
+ responseHeader = http.Header{
+ "Sec-WebSocket-Protocol": []string{"Bearer"},
+ }
+ }
+ conn, err := upgrader.Upgrade(w, r, responseHeader)
+ if err != nil {
+ p.logger.Warnln("error upgrading websocket:", err)
+ return
+ }
+ defer conn.Close()
+
+ ctx, cancelFn := context.WithCancel(context.Background())
+ defer cancelFn()
+
+ requestBodyR, requestBodyW := io.Pipe()
+ request, err := http.NewRequest(r.Method, r.URL.String(), requestBodyR)
+ if err != nil {
+ p.logger.Warnln("error preparing request:", err)
+ return
+ }
+ if swsp := r.Header.Get("Sec-WebSocket-Protocol"); swsp != "" {
+ request.Header.Set("Authorization", strings.Replace(swsp, "Bearer, ", "Bearer ", 1))
+ }
+ // If token cookie is present, populate Authorization header from the cookie instead.
+ if cookie, err := r.Cookie(p.tokenCookieName); err == nil {
+ request.Header.Set("Authorization", "Bearer "+cookie.Value)
+ }
+ if m := r.URL.Query().Get(p.methodOverrideParam); m != "" {
+ request.Method = m
+ }
+
+ if p.requestMutator != nil {
+ request = p.requestMutator(r, request)
+ }
+
+ responseBodyR, responseBodyW := io.Pipe()
+ response := newInMemoryResponseWriter(responseBodyW)
+ go func() {
+ <-ctx.Done()
+ p.logger.Debugln("closing pipes")
+ requestBodyW.CloseWithError(io.EOF)
+ responseBodyW.CloseWithError(io.EOF)
+ response.closed <- true
+ }()
+
+ go func() {
+ defer cancelFn()
+ p.h.ServeHTTP(response, request)
+ }()
+
+ // read loop -- take messages from websocket and write to http request
+ go func() {
+ defer func() {
+ cancelFn()
+ }()
+ for {
+ select {
+ case <-ctx.Done():
+ p.logger.Debugln("read loop done")
+ return
+ default:
+ }
+ p.logger.Debugln("[read] reading from socket.")
+ _, payload, err := conn.ReadMessage()
+ if err != nil {
+ if isClosedConnError(err) {
+ p.logger.Debugln("[read] websocket closed:", err)
+ return
+ }
+ p.logger.Warnln("error reading websocket message:", err)
+ return
+ }
+ p.logger.Debugln("[read] read payload:", string(payload))
+ p.logger.Debugln("[read] writing to requestBody:")
+ n, err := requestBodyW.Write(payload)
+ requestBodyW.Write([]byte("\n"))
+ p.logger.Debugln("[read] wrote to requestBody", n)
+ if err != nil {
+ p.logger.Warnln("[read] error writing message to upstream http server:", err)
+ return
+ }
+ }
+ }()
+ // write loop -- take messages from response and write to websocket
+ scanner := bufio.NewScanner(responseBodyR)
+ for scanner.Scan() {
+ if len(scanner.Bytes()) == 0 {
+ p.logger.Warnln("[write] empty scan", scanner.Err())
+ continue
+ }
+ p.logger.Debugln("[write] scanned", scanner.Text())
+ if err = conn.WriteMessage(websocket.TextMessage, scanner.Bytes()); err != nil {
+ p.logger.Warnln("[write] error writing websocket message:", err)
+ return
+ }
+ }
+ if err := scanner.Err(); err != nil {
+ p.logger.Warnln("scanner err:", err)
+ }
+}
+
+type inMemoryResponseWriter struct {
+ io.Writer
+ header http.Header
+ code int
+ closed chan bool
+}
+
+func newInMemoryResponseWriter(w io.Writer) *inMemoryResponseWriter {
+ return &inMemoryResponseWriter{
+ Writer: w,
+ header: http.Header{},
+ closed: make(chan bool, 1),
+ }
+}
+
+func (w *inMemoryResponseWriter) Write(b []byte) (int, error) {
+ return w.Writer.Write(b)
+}
+func (w *inMemoryResponseWriter) Header() http.Header {
+ return w.header
+}
+func (w *inMemoryResponseWriter) WriteHeader(code int) {
+ w.code = code
+}
+func (w *inMemoryResponseWriter) CloseNotify() <-chan bool {
+ return w.closed
+}
+func (w *inMemoryResponseWriter) Flush() {}
diff --git a/vendor/github.com/flosch/pongo2/.gitignore b/vendor/github.com/xiang90/probing/.gitignore
similarity index 54%
rename from vendor/github.com/flosch/pongo2/.gitignore
rename to vendor/github.com/xiang90/probing/.gitignore
index 89c56c06519a..daf913b1b347 100644
--- a/vendor/github.com/flosch/pongo2/.gitignore
+++ b/vendor/github.com/xiang90/probing/.gitignore
@@ -6,8 +6,6 @@
# Folders
_obj
_test
-.idea
-.vscode
# Architecture specific extensions/prefixes
*.[568vq]
@@ -22,21 +20,5 @@ _cgo_export.*
_testmain.go
*.exe
-
-.project
-EBNF.txt
-test1.tpl
-pongo2_internal_test.go
-tpl-error.out
-/count.out
-/cover.out
-*.swp
-*.iml
-/cpu.out
-/mem.out
-/pongo2.test
-*.error
-/profile
-/coverage.out
-/pongo2_internal_test.ignore
-go.sum
+*.test
+*.prof
diff --git a/vendor/github.com/xiang90/probing/LICENSE b/vendor/github.com/xiang90/probing/LICENSE
new file mode 100644
index 000000000000..cde8b8b05f53
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Xiang Li
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/xiang90/probing/README.md b/vendor/github.com/xiang90/probing/README.md
new file mode 100644
index 000000000000..2ff682057ae5
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/README.md
@@ -0,0 +1,39 @@
+## Getting Started
+
+### Install the handler
+
+We first need to serve the probing HTTP handler.
+
+```go
+ http.HandleFunc("/health", probing.NewHandler())
+ err := http.ListenAndServe(":12345", nil)
+ if err != nil {
+ log.Fatal("ListenAndServe: ", err)
+ }
+```
+
+### Start to probe
+
+Now we can start to probe the endpoint.
+
+``` go
+ id := "example"
+ probingInterval = 5 * time.Second
+ url := "http://example.com:12345/health"
+ p.AddHTTP(id, probingInterval, url)
+
+ time.Sleep(13 * time.Second)
+ status, err := p.Status(id)
+ fmt.Printf("Total Probing: %d, Total Loss: %d, Estimated RTT: %v, Estimated Clock Difference: %v\n",
+ status.Total(), status.Loss(), status.SRTT(), status.ClockDiff())
+ // Total Probing: 2, Total Loss: 0, Estimated RTT: 320.771µs, Estimated Clock Difference: -35.869µs
+```
+
+### TODOs:
+
+- TCP probing
+- UDP probing
+- Gossip based probing
+- More accurate RTT estimation
+- More accurate Clock difference estimation
+- Use a clock interface rather than the real clock
diff --git a/vendor/github.com/xiang90/probing/prober.go b/vendor/github.com/xiang90/probing/prober.go
new file mode 100644
index 000000000000..9431c101e338
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/prober.go
@@ -0,0 +1,139 @@
+package probing
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "sync"
+ "time"
+)
+
+var (
+ ErrNotFound = errors.New("probing: id not found")
+ ErrExist = errors.New("probing: id exists")
+)
+
+type Prober interface {
+ AddHTTP(id string, probingInterval time.Duration, endpoints []string) error
+ Remove(id string) error
+ RemoveAll()
+ Reset(id string) error
+ Status(id string) (Status, error)
+}
+
+type prober struct {
+ mu sync.Mutex
+ targets map[string]*status
+ tr http.RoundTripper
+}
+
+func NewProber(tr http.RoundTripper) Prober {
+ p := &prober{targets: make(map[string]*status)}
+ if tr == nil {
+ p.tr = http.DefaultTransport
+ } else {
+ p.tr = tr
+ }
+ return p
+}
+
+func (p *prober) AddHTTP(id string, probingInterval time.Duration, endpoints []string) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if _, ok := p.targets[id]; ok {
+ return ErrExist
+ }
+
+ s := &status{stopC: make(chan struct{})}
+ p.targets[id] = s
+
+ ticker := time.NewTicker(probingInterval)
+
+ go func() {
+ pinned := 0
+ for {
+ select {
+ case <-ticker.C:
+ start := time.Now()
+ req, err := http.NewRequest("GET", endpoints[pinned], nil)
+ if err != nil {
+ panic(err)
+ }
+ resp, err := p.tr.RoundTrip(req)
+ if err == nil && resp.StatusCode != http.StatusOK {
+ err = fmt.Errorf("got unexpected HTTP status code %s from %s", resp.Status, endpoints[pinned])
+ resp.Body.Close()
+ }
+ if err != nil {
+ s.recordFailure(err)
+ pinned = (pinned + 1) % len(endpoints)
+ continue
+ }
+
+ var hh Health
+ d := json.NewDecoder(resp.Body)
+ err = d.Decode(&hh)
+ resp.Body.Close()
+ if err != nil || !hh.OK {
+ s.recordFailure(err)
+ pinned = (pinned + 1) % len(endpoints)
+ continue
+ }
+
+ s.record(time.Since(start), hh.Now)
+ case <-s.stopC:
+ ticker.Stop()
+ return
+ }
+ }
+ }()
+
+ return nil
+}
+
+func (p *prober) Remove(id string) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ s, ok := p.targets[id]
+ if !ok {
+ return ErrNotFound
+ }
+ close(s.stopC)
+ delete(p.targets, id)
+ return nil
+}
+
+func (p *prober) RemoveAll() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ for _, s := range p.targets {
+ close(s.stopC)
+ }
+ p.targets = make(map[string]*status)
+}
+
+func (p *prober) Reset(id string) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ s, ok := p.targets[id]
+ if !ok {
+ return ErrNotFound
+ }
+ s.reset()
+ return nil
+}
+
+func (p *prober) Status(id string) (Status, error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ s, ok := p.targets[id]
+ if !ok {
+ return nil, ErrNotFound
+ }
+ return s, nil
+}
diff --git a/vendor/github.com/xiang90/probing/server.go b/vendor/github.com/xiang90/probing/server.go
new file mode 100644
index 000000000000..0e7b797d25ac
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/server.go
@@ -0,0 +1,25 @@
+package probing
+
+import (
+ "encoding/json"
+ "net/http"
+ "time"
+)
+
+func NewHandler() http.Handler {
+ return &httpHealth{}
+}
+
+type httpHealth struct {
+}
+
+type Health struct {
+ OK bool
+ Now time.Time
+}
+
+func (h *httpHealth) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ health := Health{OK: true, Now: time.Now()}
+ e := json.NewEncoder(w)
+ e.Encode(health)
+}
diff --git a/vendor/github.com/xiang90/probing/status.go b/vendor/github.com/xiang90/probing/status.go
new file mode 100644
index 000000000000..bb5f6599fc80
--- /dev/null
+++ b/vendor/github.com/xiang90/probing/status.go
@@ -0,0 +1,108 @@
+package probing
+
+import (
+ "sync"
+ "time"
+)
+
+var (
+ // weight factor
+ α = 0.125
+)
+
+type Status interface {
+ Total() int64
+ Loss() int64
+ Health() bool
+ Err() error
+ // Estimated smoothed round trip time
+ SRTT() time.Duration
+ // Estimated clock difference
+ ClockDiff() time.Duration
+ StopNotify() <-chan struct{}
+}
+
+type status struct {
+ mu sync.Mutex
+ srtt time.Duration
+ total int64
+ loss int64
+ health bool
+ err error
+ clockdiff time.Duration
+ stopC chan struct{}
+}
+
+// SRTT = (1-α) * SRTT + α * RTT
+func (s *status) SRTT() time.Duration {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.srtt
+}
+
+func (s *status) Total() int64 {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.total
+}
+
+func (s *status) Loss() int64 {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.loss
+}
+
+func (s *status) Health() bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.health
+}
+
+func (s *status) Err() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.err
+}
+
+func (s *status) ClockDiff() time.Duration {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.clockdiff
+}
+
+func (s *status) StopNotify() <-chan struct{} {
+ return s.stopC
+}
+
+func (s *status) record(rtt time.Duration, when time.Time) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.total += 1
+ s.health = true
+ s.srtt = time.Duration((1-α)*float64(s.srtt) + α*float64(rtt))
+ s.clockdiff = time.Now().Sub(when) - s.srtt/2
+ s.err = nil
+}
+
+func (s *status) recordFailure(err error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.total++
+ s.health = false
+ s.loss += 1
+ s.err = err
+}
+
+func (s *status) reset() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.srtt = 0
+ s.total = 0
+ s.loss = 0
+ s.health = false
+ s.clockdiff = 0
+ s.err = nil
+}
diff --git a/vendor/go.etcd.io/etcd/auth/doc.go b/vendor/go.etcd.io/etcd/auth/doc.go
new file mode 100644
index 000000000000..72741a107745
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package auth provides client role authentication for accessing keys in etcd.
+package auth
diff --git a/vendor/go.etcd.io/etcd/auth/jwt.go b/vendor/go.etcd.io/etcd/auth/jwt.go
new file mode 100644
index 000000000000..52cafe4aafb1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/jwt.go
@@ -0,0 +1,184 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "errors"
+ "time"
+
+ jwt "github.com/dgrijalva/jwt-go"
+ "go.uber.org/zap"
+)
+
+type tokenJWT struct {
+ lg *zap.Logger
+ signMethod jwt.SigningMethod
+ key interface{}
+ ttl time.Duration
+ verifyOnly bool
+}
+
+func (t *tokenJWT) enable() {}
+func (t *tokenJWT) disable() {}
+func (t *tokenJWT) invalidateUser(string) {}
+func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
+
+func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
+ // rev isn't used in JWT, it is only used in simple token
+ var (
+ username string
+ revision uint64
+ )
+
+ parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
+ if token.Method.Alg() != t.signMethod.Alg() {
+ return nil, errors.New("invalid signing method")
+ }
+ switch k := t.key.(type) {
+ case *rsa.PrivateKey:
+ return &k.PublicKey, nil
+ case *ecdsa.PrivateKey:
+ return &k.PublicKey, nil
+ default:
+ return t.key, nil
+ }
+ })
+
+ if err != nil {
+ if t.lg != nil {
+ t.lg.Warn(
+ "failed to parse a JWT token",
+ zap.String("token", token),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("failed to parse jwt token: %s", err)
+ }
+ return nil, false
+ }
+
+ claims, ok := parsed.Claims.(jwt.MapClaims)
+ if !parsed.Valid || !ok {
+ if t.lg != nil {
+ t.lg.Warn("invalid JWT token", zap.String("token", token))
+ } else {
+ plog.Warningf("invalid jwt token: %s", token)
+ }
+ return nil, false
+ }
+
+ username = claims["username"].(string)
+ revision = uint64(claims["revision"].(float64))
+
+ return &AuthInfo{Username: username, Revision: revision}, true
+}
+
+func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
+ if t.verifyOnly {
+ return "", ErrVerifyOnly
+ }
+
+ // Future work: let a jwt token include permission information would be useful for
+ // permission checking in proxy side.
+ tk := jwt.NewWithClaims(t.signMethod,
+ jwt.MapClaims{
+ "username": username,
+ "revision": revision,
+ "exp": time.Now().Add(t.ttl).Unix(),
+ })
+
+ token, err := tk.SignedString(t.key)
+ if err != nil {
+ if t.lg != nil {
+ t.lg.Debug(
+ "failed to sign a JWT token",
+ zap.String("user-name", username),
+ zap.Uint64("revision", revision),
+ zap.Error(err),
+ )
+ } else {
+ plog.Debugf("failed to sign jwt token: %s", err)
+ }
+ return "", err
+ }
+
+ if t.lg != nil {
+ t.lg.Debug(
+ "created/assigned a new JWT token",
+ zap.String("user-name", username),
+ zap.Uint64("revision", revision),
+ zap.String("token", token),
+ )
+ } else {
+ plog.Debugf("jwt token: %s", token)
+ }
+ return token, err
+}
+
+func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, error) {
+ var err error
+ var opts jwtOptions
+ err = opts.ParseWithDefaults(optMap)
+ if err != nil {
+ if lg != nil {
+ lg.Error("problem loading JWT options", zap.Error(err))
+ } else {
+ plog.Errorf("problem loading JWT options: %s", err)
+ }
+ return nil, ErrInvalidAuthOpts
+ }
+
+ var keys = make([]string, 0, len(optMap))
+ for k := range optMap {
+ if !knownOptions[k] {
+ keys = append(keys, k)
+ }
+ }
+ if len(keys) > 0 {
+ if lg != nil {
+ lg.Warn("unknown JWT options", zap.Strings("keys", keys))
+ } else {
+ plog.Warningf("unknown JWT options: %v", keys)
+ }
+ }
+
+ key, err := opts.Key()
+ if err != nil {
+ return nil, err
+ }
+
+ t := &tokenJWT{
+ lg: lg,
+ ttl: opts.TTL,
+ signMethod: opts.SignMethod,
+ key: key,
+ }
+
+ switch t.signMethod.(type) {
+ case *jwt.SigningMethodECDSA:
+ if _, ok := t.key.(*ecdsa.PublicKey); ok {
+ t.verifyOnly = true
+ }
+ case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS:
+ if _, ok := t.key.(*rsa.PublicKey); ok {
+ t.verifyOnly = true
+ }
+ }
+
+ return t, nil
+}
diff --git a/vendor/go.etcd.io/etcd/auth/metrics.go b/vendor/go.etcd.io/etcd/auth/metrics.go
new file mode 100644
index 000000000000..fe0d28e22d50
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/metrics.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "sync"
+)
+
+var (
+ currentAuthRevision = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "auth",
+ Name: "revision",
+ Help: "The current revision of auth store.",
+ },
+ func() float64 {
+ reportCurrentAuthRevMu.RLock()
+ defer reportCurrentAuthRevMu.RUnlock()
+ return reportCurrentAuthRev()
+ },
+ )
+ // overridden by auth store initialization
+ reportCurrentAuthRevMu sync.RWMutex
+ reportCurrentAuthRev = func() float64 { return 0 }
+)
+
+func init() {
+ prometheus.MustRegister(currentAuthRevision)
+}
diff --git a/vendor/go.etcd.io/etcd/auth/nop.go b/vendor/go.etcd.io/etcd/auth/nop.go
new file mode 100644
index 000000000000..d4378747bd8b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/nop.go
@@ -0,0 +1,35 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "context"
+)
+
+type tokenNop struct{}
+
+func (t *tokenNop) enable() {}
+func (t *tokenNop) disable() {}
+func (t *tokenNop) invalidateUser(string) {}
+func (t *tokenNop) genTokenPrefix() (string, error) { return "", nil }
+func (t *tokenNop) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
+ return nil, false
+}
+func (t *tokenNop) assign(ctx context.Context, username string, revision uint64) (string, error) {
+ return "", ErrAuthFailed
+}
+func newTokenProviderNop() (*tokenNop, error) {
+ return &tokenNop{}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/auth/options.go b/vendor/go.etcd.io/etcd/auth/options.go
new file mode 100644
index 000000000000..f40b92de6b39
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/options.go
@@ -0,0 +1,192 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "fmt"
+ "io/ioutil"
+ "time"
+
+ jwt "github.com/dgrijalva/jwt-go"
+)
+
+const (
+ optSignMethod = "sign-method"
+ optPublicKey = "pub-key"
+ optPrivateKey = "priv-key"
+ optTTL = "ttl"
+)
+
+var knownOptions = map[string]bool{
+ optSignMethod: true,
+ optPublicKey: true,
+ optPrivateKey: true,
+ optTTL: true,
+}
+
+var (
+ // DefaultTTL will be used when a 'ttl' is not specified
+ DefaultTTL = 5 * time.Minute
+)
+
+type jwtOptions struct {
+ SignMethod jwt.SigningMethod
+ PublicKey []byte
+ PrivateKey []byte
+ TTL time.Duration
+}
+
+// ParseWithDefaults will load options from the specified map or set defaults where appropriate
+func (opts *jwtOptions) ParseWithDefaults(optMap map[string]string) error {
+ if opts.TTL == 0 && optMap[optTTL] == "" {
+ opts.TTL = DefaultTTL
+ }
+
+ return opts.Parse(optMap)
+}
+
+// Parse will load options from the specified map
+func (opts *jwtOptions) Parse(optMap map[string]string) error {
+ var err error
+ if ttl := optMap[optTTL]; ttl != "" {
+ opts.TTL, err = time.ParseDuration(ttl)
+ if err != nil {
+ return err
+ }
+ }
+
+ if file := optMap[optPublicKey]; file != "" {
+ opts.PublicKey, err = ioutil.ReadFile(file)
+ if err != nil {
+ return err
+ }
+ }
+
+ if file := optMap[optPrivateKey]; file != "" {
+ opts.PrivateKey, err = ioutil.ReadFile(file)
+ if err != nil {
+ return err
+ }
+ }
+
+ // signing method is a required field
+ method := optMap[optSignMethod]
+ opts.SignMethod = jwt.GetSigningMethod(method)
+ if opts.SignMethod == nil {
+ return ErrInvalidAuthMethod
+ }
+
+ return nil
+}
+
+// Key will parse and return the appropriately typed key for the selected signature method
+func (opts *jwtOptions) Key() (interface{}, error) {
+ switch opts.SignMethod.(type) {
+ case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS:
+ return opts.rsaKey()
+ case *jwt.SigningMethodECDSA:
+ return opts.ecKey()
+ case *jwt.SigningMethodHMAC:
+ return opts.hmacKey()
+ default:
+ return nil, fmt.Errorf("unsupported signing method: %T", opts.SignMethod)
+ }
+}
+
+func (opts *jwtOptions) hmacKey() (interface{}, error) {
+ if len(opts.PrivateKey) == 0 {
+ return nil, ErrMissingKey
+ }
+ return opts.PrivateKey, nil
+}
+
+func (opts *jwtOptions) rsaKey() (interface{}, error) {
+ var (
+ priv *rsa.PrivateKey
+ pub *rsa.PublicKey
+ err error
+ )
+
+ if len(opts.PrivateKey) > 0 {
+ priv, err = jwt.ParseRSAPrivateKeyFromPEM(opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(opts.PublicKey) > 0 {
+ pub, err = jwt.ParseRSAPublicKeyFromPEM(opts.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if priv == nil {
+ if pub == nil {
+ // Neither key given
+ return nil, ErrMissingKey
+ }
+ // Public key only, can verify tokens
+ return pub, nil
+ }
+
+ // both keys provided, make sure they match
+ if pub != nil && pub.E != priv.E && pub.N.Cmp(priv.N) != 0 {
+ return nil, ErrKeyMismatch
+ }
+
+ return priv, nil
+}
+
+func (opts *jwtOptions) ecKey() (interface{}, error) {
+ var (
+ priv *ecdsa.PrivateKey
+ pub *ecdsa.PublicKey
+ err error
+ )
+
+ if len(opts.PrivateKey) > 0 {
+ priv, err = jwt.ParseECPrivateKeyFromPEM(opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(opts.PublicKey) > 0 {
+ pub, err = jwt.ParseECPublicKeyFromPEM(opts.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if priv == nil {
+ if pub == nil {
+ // Neither key given
+ return nil, ErrMissingKey
+ }
+ // Public key only, can verify tokens
+ return pub, nil
+ }
+
+ // both keys provided, make sure they match
+ if pub != nil && pub.Curve != priv.Curve &&
+ pub.X.Cmp(priv.X) != 0 && pub.Y.Cmp(priv.Y) != 0 {
+ return nil, ErrKeyMismatch
+ }
+
+ return priv, nil
+}
diff --git a/vendor/go.etcd.io/etcd/auth/range_perm_cache.go b/vendor/go.etcd.io/etcd/auth/range_perm_cache.go
new file mode 100644
index 000000000000..7b6c182409c7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/range_perm_cache.go
@@ -0,0 +1,153 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "go.etcd.io/etcd/auth/authpb"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/pkg/adt"
+
+ "go.uber.org/zap"
+)
+
+func getMergedPerms(lg *zap.Logger, tx backend.BatchTx, userName string) *unifiedRangePermissions {
+ user := getUser(lg, tx, userName)
+ if user == nil {
+ return nil
+ }
+
+ readPerms := adt.NewIntervalTree()
+ writePerms := adt.NewIntervalTree()
+
+ for _, roleName := range user.Roles {
+ role := getRole(tx, roleName)
+ if role == nil {
+ continue
+ }
+
+ for _, perm := range role.KeyPermission {
+ var ivl adt.Interval
+ var rangeEnd []byte
+
+ if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
+ rangeEnd = perm.RangeEnd
+ }
+
+ if len(perm.RangeEnd) != 0 {
+ ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd)
+ } else {
+ ivl = adt.NewBytesAffinePoint(perm.Key)
+ }
+
+ switch perm.PermType {
+ case authpb.READWRITE:
+ readPerms.Insert(ivl, struct{}{})
+ writePerms.Insert(ivl, struct{}{})
+
+ case authpb.READ:
+ readPerms.Insert(ivl, struct{}{})
+
+ case authpb.WRITE:
+ writePerms.Insert(ivl, struct{}{})
+ }
+ }
+ }
+
+ return &unifiedRangePermissions{
+ readPerms: readPerms,
+ writePerms: writePerms,
+ }
+}
+
+func checkKeyInterval(
+ lg *zap.Logger,
+ cachedPerms *unifiedRangePermissions,
+ key, rangeEnd []byte,
+ permtyp authpb.Permission_Type) bool {
+ if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
+ rangeEnd = nil
+ }
+
+ ivl := adt.NewBytesAffineInterval(key, rangeEnd)
+ switch permtyp {
+ case authpb.READ:
+ return cachedPerms.readPerms.Contains(ivl)
+ case authpb.WRITE:
+ return cachedPerms.writePerms.Contains(ivl)
+ default:
+ if lg != nil {
+ lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String()))
+ } else {
+ plog.Panicf("unknown auth type: %v", permtyp)
+ }
+ }
+ return false
+}
+
+func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
+ pt := adt.NewBytesAffinePoint(key)
+ switch permtyp {
+ case authpb.READ:
+ return cachedPerms.readPerms.Intersects(pt)
+ case authpb.WRITE:
+ return cachedPerms.writePerms.Intersects(pt)
+ default:
+ if lg != nil {
+ lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String()))
+ } else {
+ plog.Panicf("unknown auth type: %v", permtyp)
+ }
+ }
+ return false
+}
+
+func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
+ // assumption: tx is Lock()ed
+ _, ok := as.rangePermCache[userName]
+ if !ok {
+ perms := getMergedPerms(as.lg, tx, userName)
+ if perms == nil {
+ if as.lg != nil {
+ as.lg.Warn(
+ "failed to create a merged permission",
+ zap.String("user-name", userName),
+ )
+ } else {
+ plog.Errorf("failed to create a unified permission of user %s", userName)
+ }
+ return false
+ }
+ as.rangePermCache[userName] = perms
+ }
+
+ if len(rangeEnd) == 0 {
+ return checkKeyPoint(as.lg, as.rangePermCache[userName], key, permtyp)
+ }
+
+ return checkKeyInterval(as.lg, as.rangePermCache[userName], key, rangeEnd, permtyp)
+}
+
+func (as *authStore) clearCachedPerm() {
+ as.rangePermCache = make(map[string]*unifiedRangePermissions)
+}
+
+func (as *authStore) invalidateCachedPerm(userName string) {
+ delete(as.rangePermCache, userName)
+}
+
+type unifiedRangePermissions struct {
+ readPerms adt.IntervalTree
+ writePerms adt.IntervalTree
+}
diff --git a/vendor/go.etcd.io/etcd/auth/simple_token.go b/vendor/go.etcd.io/etcd/auth/simple_token.go
new file mode 100644
index 000000000000..934978c9857e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/simple_token.go
@@ -0,0 +1,243 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+// CAUTION: This random number based token mechanism is only for testing purpose.
+// JWT based mechanism will be added in the near future.
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "math/big"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+const (
+ letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ defaultSimpleTokenLength = 16
+)
+
+// var for testing purposes
+var (
+ simpleTokenTTL = 5 * time.Minute
+ simpleTokenTTLResolution = 1 * time.Second
+)
+
+type simpleTokenTTLKeeper struct {
+ tokens map[string]time.Time
+ donec chan struct{}
+ stopc chan struct{}
+ deleteTokenFunc func(string)
+ mu *sync.Mutex
+}
+
+func (tm *simpleTokenTTLKeeper) stop() {
+ select {
+ case tm.stopc <- struct{}{}:
+ case <-tm.donec:
+ }
+ <-tm.donec
+}
+
+func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
+ tm.tokens[token] = time.Now().Add(simpleTokenTTL)
+}
+
+func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
+ if _, ok := tm.tokens[token]; ok {
+ tm.tokens[token] = time.Now().Add(simpleTokenTTL)
+ }
+}
+
+func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
+ delete(tm.tokens, token)
+}
+
+func (tm *simpleTokenTTLKeeper) run() {
+ tokenTicker := time.NewTicker(simpleTokenTTLResolution)
+ defer func() {
+ tokenTicker.Stop()
+ close(tm.donec)
+ }()
+ for {
+ select {
+ case <-tokenTicker.C:
+ nowtime := time.Now()
+ tm.mu.Lock()
+ for t, tokenendtime := range tm.tokens {
+ if nowtime.After(tokenendtime) {
+ tm.deleteTokenFunc(t)
+ delete(tm.tokens, t)
+ }
+ }
+ tm.mu.Unlock()
+ case <-tm.stopc:
+ return
+ }
+ }
+}
+
+type tokenSimple struct {
+ lg *zap.Logger
+ indexWaiter func(uint64) <-chan struct{}
+ simpleTokenKeeper *simpleTokenTTLKeeper
+ simpleTokensMu sync.Mutex
+ simpleTokens map[string]string // token -> username
+}
+
+func (t *tokenSimple) genTokenPrefix() (string, error) {
+ ret := make([]byte, defaultSimpleTokenLength)
+
+ for i := 0; i < defaultSimpleTokenLength; i++ {
+ bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
+ if err != nil {
+ return "", err
+ }
+
+ ret[i] = letters[bInt.Int64()]
+ }
+
+ return string(ret), nil
+}
+
+func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
+ t.simpleTokensMu.Lock()
+ defer t.simpleTokensMu.Unlock()
+ if t.simpleTokenKeeper == nil {
+ return
+ }
+
+ _, ok := t.simpleTokens[token]
+ if ok {
+ if t.lg != nil {
+ t.lg.Panic(
+ "failed to assign already-used simple token to a user",
+ zap.String("user-name", username),
+ zap.String("token", token),
+ )
+ } else {
+ plog.Panicf("token %s is already used", token)
+ }
+ }
+
+ t.simpleTokens[token] = username
+ t.simpleTokenKeeper.addSimpleToken(token)
+}
+
+func (t *tokenSimple) invalidateUser(username string) {
+ if t.simpleTokenKeeper == nil {
+ return
+ }
+ t.simpleTokensMu.Lock()
+ for token, name := range t.simpleTokens {
+ if name == username {
+ delete(t.simpleTokens, token)
+ t.simpleTokenKeeper.deleteSimpleToken(token)
+ }
+ }
+ t.simpleTokensMu.Unlock()
+}
+
+func (t *tokenSimple) enable() {
+ delf := func(tk string) {
+ if username, ok := t.simpleTokens[tk]; ok {
+ if t.lg != nil {
+ t.lg.Info(
+ "deleted a simple token",
+ zap.String("user-name", username),
+ zap.String("token", tk),
+ )
+ } else {
+ plog.Infof("deleting token %s for user %s", tk, username)
+ }
+ delete(t.simpleTokens, tk)
+ }
+ }
+ t.simpleTokenKeeper = &simpleTokenTTLKeeper{
+ tokens: make(map[string]time.Time),
+ donec: make(chan struct{}),
+ stopc: make(chan struct{}),
+ deleteTokenFunc: delf,
+ mu: &t.simpleTokensMu,
+ }
+ go t.simpleTokenKeeper.run()
+}
+
+func (t *tokenSimple) disable() {
+ t.simpleTokensMu.Lock()
+ tk := t.simpleTokenKeeper
+ t.simpleTokenKeeper = nil
+ t.simpleTokens = make(map[string]string) // invalidate all tokens
+ t.simpleTokensMu.Unlock()
+ if tk != nil {
+ tk.stop()
+ }
+}
+
+func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) {
+ if !t.isValidSimpleToken(ctx, token) {
+ return nil, false
+ }
+ t.simpleTokensMu.Lock()
+ username, ok := t.simpleTokens[token]
+ if ok && t.simpleTokenKeeper != nil {
+ t.simpleTokenKeeper.resetSimpleToken(token)
+ }
+ t.simpleTokensMu.Unlock()
+ return &AuthInfo{Username: username, Revision: revision}, ok
+}
+
+func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
+ // rev isn't used in simple token, it is only used in JWT
+ index := ctx.Value(AuthenticateParamIndex{}).(uint64)
+ simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string)
+ token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index)
+ t.assignSimpleTokenToUser(username, token)
+
+ return token, nil
+}
+
+func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool {
+ splitted := strings.Split(token, ".")
+ if len(splitted) != 2 {
+ return false
+ }
+ index, err := strconv.Atoi(splitted[1])
+ if err != nil {
+ return false
+ }
+
+ select {
+ case <-t.indexWaiter(uint64(index)):
+ return true
+ case <-ctx.Done():
+ }
+
+ return false
+}
+
+func newTokenProviderSimple(lg *zap.Logger, indexWaiter func(uint64) <-chan struct{}) *tokenSimple {
+ return &tokenSimple{
+ lg: lg,
+ simpleTokens: make(map[string]string),
+ indexWaiter: indexWaiter,
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/auth/store.go b/vendor/go.etcd.io/etcd/auth/store.go
new file mode 100644
index 000000000000..bf3e474bf803
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/auth/store.go
@@ -0,0 +1,1486 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "go.etcd.io/etcd/auth/authpb"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc/backend"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+)
+
+var (
+ enableFlagKey = []byte("authEnabled")
+ authEnabled = []byte{1}
+ authDisabled = []byte{0}
+
+ revisionKey = []byte("authRevision")
+
+ authBucketName = []byte("auth")
+ authUsersBucketName = []byte("authUsers")
+ authRolesBucketName = []byte("authRoles")
+
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "auth")
+
+ ErrRootUserNotExist = errors.New("auth: root user does not exist")
+ ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
+ ErrUserAlreadyExist = errors.New("auth: user already exists")
+ ErrUserEmpty = errors.New("auth: user name is empty")
+ ErrUserNotFound = errors.New("auth: user not found")
+ ErrRoleAlreadyExist = errors.New("auth: role already exists")
+ ErrRoleNotFound = errors.New("auth: role not found")
+ ErrRoleEmpty = errors.New("auth: role name is empty")
+ ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
+ ErrPermissionDenied = errors.New("auth: permission denied")
+ ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
+ ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
+ ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
+ ErrAuthOldRevision = errors.New("auth: revision in header is old")
+ ErrInvalidAuthToken = errors.New("auth: invalid auth token")
+ ErrInvalidAuthOpts = errors.New("auth: invalid auth options")
+ ErrInvalidAuthMgmt = errors.New("auth: invalid auth management")
+ ErrInvalidAuthMethod = errors.New("auth: invalid auth signature method")
+ ErrMissingKey = errors.New("auth: missing key data")
+ ErrKeyMismatch = errors.New("auth: public and private keys don't match")
+ ErrVerifyOnly = errors.New("auth: token signing attempted with verify-only key")
+)
+
+const (
+ rootUser = "root"
+ rootRole = "root"
+
+ tokenTypeSimple = "simple"
+ tokenTypeJWT = "jwt"
+
+ revBytesLen = 8
+)
+
+type AuthInfo struct {
+ Username string
+ Revision uint64
+}
+
+// AuthenticateParamIndex is used for a key of context in the parameters of Authenticate()
+type AuthenticateParamIndex struct{}
+
+// AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate()
+type AuthenticateParamSimpleTokenPrefix struct{}
+
+// saveConsistentIndexFunc is used to sync consistentIndex to backend, now reusing store.saveIndex
+type saveConsistentIndexFunc func(tx backend.BatchTx)
+
+// AuthStore defines auth storage interface.
+type AuthStore interface {
+ // AuthEnable turns on the authentication feature
+ AuthEnable() error
+
+ // AuthDisable turns off the authentication feature
+ AuthDisable()
+
+ // IsAuthEnabled returns true if the authentication feature is enabled.
+ IsAuthEnabled() bool
+
+ // Authenticate does authentication based on given user name and password
+ Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error)
+
+ // Recover recovers the state of auth store from the given backend
+ Recover(b backend.Backend)
+
+ // UserAdd adds a new user
+ UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+
+ // UserDelete deletes a user
+ UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+
+ // UserChangePassword changes a password of a user
+ UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+
+ // UserGrantRole grants a role to the user
+ UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+
+ // UserGet gets the detailed information of a users
+ UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+
+ // UserRevokeRole revokes a role of a user
+ UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+
+ // RoleAdd adds a new role
+ RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+
+ // RoleGrantPermission grants a permission to a role
+ RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+
+ // RoleGet gets the detailed information of a role
+ RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+
+ // RoleRevokePermission gets the detailed information of a role
+ RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+
+ // RoleDelete gets the detailed information of a role
+ RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+
+ // UserList gets a list of all users
+ UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+
+ // RoleList gets a list of all roles
+ RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+
+ // IsPutPermitted checks put permission of the user
+ IsPutPermitted(authInfo *AuthInfo, key []byte) error
+
+ // IsRangePermitted checks range permission of the user
+ IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
+
+ // IsDeleteRangePermitted checks delete-range permission of the user
+ IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
+
+ // IsAdminPermitted checks admin permission of the user
+ IsAdminPermitted(authInfo *AuthInfo) error
+
+ // GenTokenPrefix produces a random string in a case of simple token
+ // in a case of JWT, it produces an empty string
+ GenTokenPrefix() (string, error)
+
+ // Revision gets current revision of authStore
+ Revision() uint64
+
+ // CheckPassword checks a given pair of username and password is correct
+ CheckPassword(username, password string) (uint64, error)
+
+ // Close does cleanup of AuthStore
+ Close() error
+
+ // AuthInfoFromCtx gets AuthInfo from gRPC's context
+ AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
+
+ // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context
+ AuthInfoFromTLS(ctx context.Context) *AuthInfo
+
+ // WithRoot generates and installs a token that can be used as a root credential
+ WithRoot(ctx context.Context) context.Context
+
+ // HasRole checks that user has role
+ HasRole(user, role string) bool
+
+ // SetConsistentIndexSyncer sets consistentIndex syncer
+ SetConsistentIndexSyncer(syncer saveConsistentIndexFunc)
+}
+
+type TokenProvider interface {
+ info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool)
+ assign(ctx context.Context, username string, revision uint64) (string, error)
+ enable()
+ disable()
+
+ invalidateUser(string)
+ genTokenPrefix() (string, error)
+}
+
+type authStore struct {
+ // atomic operations; need 64-bit align, or 32-bit tests will crash
+ revision uint64
+
+ lg *zap.Logger
+ be backend.Backend
+ enabled bool
+ enabledMu sync.RWMutex
+
+ rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
+
+ tokenProvider TokenProvider
+ syncConsistentIndex saveConsistentIndexFunc
+ bcryptCost int // the algorithm cost / strength for hashing auth passwords
+}
+
+func (as *authStore) SetConsistentIndexSyncer(syncer saveConsistentIndexFunc) {
+ as.syncConsistentIndex = syncer
+}
+func (as *authStore) AuthEnable() error {
+ as.enabledMu.Lock()
+ defer as.enabledMu.Unlock()
+ if as.enabled {
+ if as.lg != nil {
+ as.lg.Info("authentication is already enabled; ignored auth enable request")
+ } else {
+ plog.Noticef("Authentication already enabled")
+ }
+ return nil
+ }
+ b := as.be
+ tx := b.BatchTx()
+ tx.Lock()
+ defer func() {
+ tx.Unlock()
+ b.ForceCommit()
+ }()
+
+ u := getUser(as.lg, tx, rootUser)
+ if u == nil {
+ return ErrRootUserNotExist
+ }
+
+ if !hasRootRole(u) {
+ return ErrRootRoleNotExist
+ }
+
+ tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
+
+ as.enabled = true
+ as.tokenProvider.enable()
+
+ as.rangePermCache = make(map[string]*unifiedRangePermissions)
+
+ as.setRevision(getRevision(tx))
+
+ if as.lg != nil {
+ as.lg.Info("enabled authentication")
+ } else {
+ plog.Noticef("Authentication enabled")
+ }
+ return nil
+}
+
+func (as *authStore) AuthDisable() {
+ as.enabledMu.Lock()
+ defer as.enabledMu.Unlock()
+ if !as.enabled {
+ return
+ }
+ b := as.be
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+ tx.Unlock()
+ b.ForceCommit()
+
+ as.enabled = false
+ as.tokenProvider.disable()
+
+ if as.lg != nil {
+ as.lg.Info("disabled authentication")
+ } else {
+ plog.Noticef("Authentication disabled")
+ }
+}
+
+func (as *authStore) Close() error {
+ as.enabledMu.Lock()
+ defer as.enabledMu.Unlock()
+ if !as.enabled {
+ return nil
+ }
+ as.tokenProvider.disable()
+ return nil
+}
+
+func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
+ if !as.IsAuthEnabled() {
+ return nil, ErrAuthNotEnabled
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, username)
+ if user == nil {
+ return nil, ErrAuthFailed
+ }
+
+ if user.Options != nil && user.Options.NoPassword {
+ return nil, ErrAuthFailed
+ }
+
+ // Password checking is already performed in the API layer, so we don't need to check for now.
+ // Staleness of password can be detected with OCC in the API layer, too.
+
+ token, err := as.tokenProvider.assign(ctx, username, as.Revision())
+ if err != nil {
+ return nil, err
+ }
+
+ if as.lg != nil {
+ as.lg.Debug(
+ "authenticated a user",
+ zap.String("user-name", username),
+ zap.String("token", token),
+ )
+ } else {
+ plog.Debugf("authorized %s, token is %s", username, token)
+ }
+ return &pb.AuthenticateResponse{Token: token}, nil
+}
+
+func (as *authStore) CheckPassword(username, password string) (uint64, error) {
+ if !as.IsAuthEnabled() {
+ return 0, ErrAuthNotEnabled
+ }
+
+ var user *authpb.User
+ // CompareHashAndPassword is very expensive, so we use closures
+ // to avoid putting it in the critical section of the tx lock.
+ revision, err := func() (uint64, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user = getUser(as.lg, tx, username)
+ if user == nil {
+ return 0, ErrAuthFailed
+ }
+
+ if user.Options != nil && user.Options.NoPassword {
+ return 0, ErrAuthFailed
+ }
+
+ return getRevision(tx), nil
+ }()
+ if err != nil {
+ return 0, err
+ }
+
+ if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
+ if as.lg != nil {
+ as.lg.Info("invalid password", zap.String("user-name", username))
+ } else {
+ plog.Noticef("authentication failed, invalid password for user %s", username)
+ }
+ return 0, ErrAuthFailed
+ }
+ return revision, nil
+}
+
+func (as *authStore) Recover(be backend.Backend) {
+ enabled := false
+ as.be = be
+ tx := be.BatchTx()
+ tx.Lock()
+ _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
+ if len(vs) == 1 {
+ if bytes.Equal(vs[0], authEnabled) {
+ enabled = true
+ }
+ }
+
+ as.setRevision(getRevision(tx))
+
+ tx.Unlock()
+
+ as.enabledMu.Lock()
+ as.enabled = enabled
+ as.enabledMu.Unlock()
+}
+
+func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ if len(r.Name) == 0 {
+ return nil, ErrUserEmpty
+ }
+
+ var hashed []byte
+ var err error
+
+ noPassword := r.Options != nil && r.Options.NoPassword
+ if !noPassword {
+ hashed, err = bcrypt.GenerateFromPassword([]byte(r.Password), as.bcryptCost)
+ if err != nil {
+ if as.lg != nil {
+ as.lg.Warn(
+ "failed to bcrypt hash password",
+ zap.String("user-name", r.Name),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("failed to hash password: %s", err)
+ }
+ return nil, err
+ }
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.Name)
+ if user != nil {
+ return nil, ErrUserAlreadyExist
+ }
+
+ options := r.Options
+ if options == nil {
+ options = &authpb.UserAddOptions{
+ NoPassword: false,
+ }
+ }
+
+ newUser := &authpb.User{
+ Name: []byte(r.Name),
+ Password: hashed,
+ Options: options,
+ }
+
+ putUser(as.lg, tx, newUser)
+
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+
+ if as.lg != nil {
+ as.lg.Info("added a user", zap.String("user-name", r.Name))
+ } else {
+ plog.Noticef("added a new user: %s", r.Name)
+ }
+ return &pb.AuthUserAddResponse{}, nil
+}
+
+func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ if as.enabled && r.Name == rootUser {
+ if as.lg != nil {
+ as.lg.Warn("cannot delete 'root' user", zap.String("user-name", r.Name))
+ } else {
+ plog.Errorf("the user root must not be deleted")
+ }
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.Name)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ delUser(tx, r.Name)
+
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+
+ as.invalidateCachedPerm(r.Name)
+ as.tokenProvider.invalidateUser(r.Name)
+
+ if as.lg != nil {
+ as.lg.Info(
+ "deleted a user",
+ zap.String("user-name", r.Name),
+ zap.Strings("user-roles", user.Roles),
+ )
+ } else {
+ plog.Noticef("deleted a user: %s", r.Name)
+ }
+ return &pb.AuthUserDeleteResponse{}, nil
+}
+
+func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
+ // If the cost is too high, we should move the encryption to outside of the raft
+ hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), as.bcryptCost)
+ if err != nil {
+ if as.lg != nil {
+ as.lg.Warn(
+ "failed to bcrypt hash password",
+ zap.String("user-name", r.Name),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("failed to hash password: %s", err)
+ }
+ return nil, err
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.Name)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ updatedUser := &authpb.User{
+ Name: []byte(r.Name),
+ Roles: user.Roles,
+ Password: hashed,
+ Options: user.Options,
+ }
+
+ putUser(as.lg, tx, updatedUser)
+
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+
+ as.invalidateCachedPerm(r.Name)
+ as.tokenProvider.invalidateUser(r.Name)
+
+ if as.lg != nil {
+ as.lg.Info(
+ "changed a password of a user",
+ zap.String("user-name", r.Name),
+ zap.Strings("user-roles", user.Roles),
+ )
+ } else {
+ plog.Noticef("changed a password of a user: %s", r.Name)
+ }
+ return &pb.AuthUserChangePasswordResponse{}, nil
+}
+
+func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.User)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ if r.Role != rootRole {
+ role := getRole(tx, r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+ }
+
+ idx := sort.SearchStrings(user.Roles, r.Role)
+ if idx < len(user.Roles) && user.Roles[idx] == r.Role {
+ if as.lg != nil {
+ as.lg.Warn(
+ "ignored grant role request to a user",
+ zap.String("user-name", r.User),
+ zap.Strings("user-roles", user.Roles),
+ zap.String("duplicate-role-name", r.Role),
+ )
+ } else {
+ plog.Warningf("user %s is already granted role %s", r.User, r.Role)
+ }
+ return &pb.AuthUserGrantRoleResponse{}, nil
+ }
+
+ user.Roles = append(user.Roles, r.Role)
+ sort.Strings(user.Roles)
+
+ putUser(as.lg, tx, user)
+
+ as.invalidateCachedPerm(r.User)
+
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+
+ if as.lg != nil {
+ as.lg.Info(
+ "granted a role to a user",
+ zap.String("user-name", r.User),
+ zap.Strings("user-roles", user.Roles),
+ zap.String("added-role-name", r.Role),
+ )
+ } else {
+ plog.Noticef("granted role %s to user %s", r.Role, r.User)
+ }
+ return &pb.AuthUserGrantRoleResponse{}, nil
+}
+
+func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ user := getUser(as.lg, tx, r.Name)
+ tx.Unlock()
+
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ var resp pb.AuthUserGetResponse
+ resp.Roles = append(resp.Roles, user.Roles...)
+ return &resp, nil
+}
+
+func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ users := getAllUsers(as.lg, tx)
+ tx.Unlock()
+
+ resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
+ for i := range users {
+ resp.Users[i] = string(users[i].Name)
+ }
+ return resp, nil
+}
+
+func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ if as.enabled && r.Name == rootUser && r.Role == rootRole {
+ if as.lg != nil {
+ as.lg.Warn(
+ "'root' user cannot revoke 'root' role",
+ zap.String("user-name", r.Name),
+ zap.String("role-name", r.Role),
+ )
+ } else {
+ plog.Errorf("the role root must not be revoked from the user root")
+ }
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, r.Name)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ updatedUser := &authpb.User{
+ Name: user.Name,
+ Password: user.Password,
+ Options: user.Options,
+ }
+
+ for _, role := range user.Roles {
+ if role != r.Role {
+ updatedUser.Roles = append(updatedUser.Roles, role)
+ }
+ }
+
+ if len(updatedUser.Roles) == len(user.Roles) {
+ return nil, ErrRoleNotGranted
+ }
+
+ putUser(as.lg, tx, updatedUser)
+
+ as.invalidateCachedPerm(r.Name)
+
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+
+ if as.lg != nil {
+ as.lg.Info(
+ "revoked a role from a user",
+ zap.String("user-name", r.Name),
+ zap.Strings("old-user-roles", user.Roles),
+ zap.Strings("new-user-roles", updatedUser.Roles),
+ zap.String("revoked-role-name", r.Role),
+ )
+ } else {
+ plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
+ }
+ return &pb.AuthUserRevokeRoleResponse{}, nil
+}
+
+func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ var resp pb.AuthRoleGetResponse
+
+ role := getRole(tx, r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+ resp.Perm = append(resp.Perm, role.KeyPermission...)
+ return &resp, nil
+}
+
+func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ roles := getAllRoles(as.lg, tx)
+ tx.Unlock()
+
+ resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
+ for i := range roles {
+ resp.Roles[i] = string(roles[i].Name)
+ }
+ return resp, nil
+}
+
+func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := getRole(tx, r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+
+ updatedRole := &authpb.Role{
+ Name: role.Name,
+ }
+
+ for _, perm := range role.KeyPermission {
+ if !bytes.Equal(perm.Key, r.Key) || !bytes.Equal(perm.RangeEnd, r.RangeEnd) {
+ updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm)
+ }
+ }
+
+ if len(role.KeyPermission) == len(updatedRole.KeyPermission) {
+ return nil, ErrPermissionNotGranted
+ }
+
+ putRole(as.lg, tx, updatedRole)
+
+ // TODO(mitake): currently single role update invalidates every cache
+ // It should be optimized.
+ as.clearCachedPerm()
+
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+
+ if as.lg != nil {
+ as.lg.Info(
+ "revoked a permission on range",
+ zap.String("role-name", r.Role),
+ zap.String("key", string(r.Key)),
+ zap.String("range-end", string(r.RangeEnd)),
+ )
+ } else {
+ plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
+ }
+ return &pb.AuthRoleRevokePermissionResponse{}, nil
+}
+
+func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ if as.enabled && r.Role == rootRole {
+ if as.lg != nil {
+ as.lg.Warn("cannot delete 'root' role", zap.String("role-name", r.Role))
+ } else {
+ plog.Errorf("the role root must not be deleted")
+ }
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := getRole(tx, r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+
+ delRole(tx, r.Role)
+
+ users := getAllUsers(as.lg, tx)
+ for _, user := range users {
+ updatedUser := &authpb.User{
+ Name: user.Name,
+ Password: user.Password,
+ Options: user.Options,
+ }
+
+ for _, role := range user.Roles {
+ if role != r.Role {
+ updatedUser.Roles = append(updatedUser.Roles, role)
+ }
+ }
+
+ if len(updatedUser.Roles) == len(user.Roles) {
+ continue
+ }
+
+ putUser(as.lg, tx, updatedUser)
+
+ as.invalidateCachedPerm(string(user.Name))
+ }
+
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+
+ if as.lg != nil {
+ as.lg.Info("deleted a role", zap.String("role-name", r.Role))
+ } else {
+ plog.Noticef("deleted role %s", r.Role)
+ }
+ return &pb.AuthRoleDeleteResponse{}, nil
+}
+
+func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ if len(r.Name) == 0 {
+ return nil, ErrRoleEmpty
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := getRole(tx, r.Name)
+ if role != nil {
+ return nil, ErrRoleAlreadyExist
+ }
+
+ newRole := &authpb.Role{
+ Name: []byte(r.Name),
+ }
+
+ putRole(as.lg, tx, newRole)
+
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+
+ if as.lg != nil {
+ as.lg.Info("created a role", zap.String("role-name", r.Name))
+ } else {
+ plog.Noticef("Role %s is created", r.Name)
+ }
+ return &pb.AuthRoleAddResponse{}, nil
+}
+
+func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) {
+ return as.tokenProvider.info(ctx, token, as.Revision())
+}
+
+type permSlice []*authpb.Permission
+
+func (perms permSlice) Len() int {
+ return len(perms)
+}
+
+func (perms permSlice) Less(i, j int) bool {
+ return bytes.Compare(perms[i].Key, perms[j].Key) < 0
+}
+
+func (perms permSlice) Swap(i, j int) {
+ perms[i], perms[j] = perms[j], perms[i]
+}
+
+func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := getRole(tx, r.Name)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+
+ idx := sort.Search(len(role.KeyPermission), func(i int) bool {
+ return bytes.Compare(role.KeyPermission[i].Key, r.Perm.Key) >= 0
+ })
+
+ if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) {
+ // update existing permission
+ role.KeyPermission[idx].PermType = r.Perm.PermType
+ } else {
+ // append new permission to the role
+ newPerm := &authpb.Permission{
+ Key: r.Perm.Key,
+ RangeEnd: r.Perm.RangeEnd,
+ PermType: r.Perm.PermType,
+ }
+
+ role.KeyPermission = append(role.KeyPermission, newPerm)
+ sort.Sort(permSlice(role.KeyPermission))
+ }
+
+ putRole(as.lg, tx, role)
+
+ // TODO(mitake): currently single role update invalidates every cache
+ // It should be optimized.
+ as.clearCachedPerm()
+
+ as.commitRevision(tx)
+ as.saveConsistentIndex(tx)
+
+ if as.lg != nil {
+ as.lg.Info(
+ "granted/updated a permission to a user",
+ zap.String("user-name", r.Name),
+ zap.String("permission-name", authpb.Permission_Type_name[int32(r.Perm.PermType)]),
+ )
+ } else {
+ plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
+ }
+ return &pb.AuthRoleGrantPermissionResponse{}, nil
+}
+
+func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
+ // TODO(mitake): this function would be costly so we need a caching mechanism
+ if !as.IsAuthEnabled() {
+ return nil
+ }
+
+ // only gets rev == 0 when passed AuthInfo{}; no user given
+ if revision == 0 {
+ return ErrUserEmpty
+ }
+ rev := as.Revision()
+ if revision < rev {
+ if as.lg != nil {
+ as.lg.Warn("request auth revision is less than current node auth revision",
+ zap.Uint64("current node auth revision", rev),
+ zap.Uint64("request auth revision", revision),
+ zap.ByteString("request key", key),
+ zap.Error(ErrAuthOldRevision))
+ } else {
+ plog.Warningf("request auth revision is less than current node auth revision,"+
+ "current node auth revision is %d,"+
+ "request auth revision is %d,"+
+ "request key is %s, "+
+ "err is %v", rev, revision, key, ErrAuthOldRevision)
+ }
+ return ErrAuthOldRevision
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := getUser(as.lg, tx, userName)
+ if user == nil {
+ if as.lg != nil {
+ as.lg.Warn("cannot find a user for permission check", zap.String("user-name", userName))
+ } else {
+ plog.Errorf("invalid user name %s for permission checking", userName)
+ }
+ return ErrPermissionDenied
+ }
+
+ // root role should have permission on all ranges
+ if hasRootRole(user) {
+ return nil
+ }
+
+ if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
+ return nil
+ }
+
+ return ErrPermissionDenied
+}
+
+func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
+ return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
+}
+
+func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
+ return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
+}
+
+func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
+ return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
+}
+
+func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
+ if !as.IsAuthEnabled() {
+ return nil
+ }
+ if authInfo == nil {
+ return ErrUserEmpty
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ u := getUser(as.lg, tx, authInfo.Username)
+ tx.Unlock()
+
+ if u == nil {
+ return ErrUserNotFound
+ }
+
+ if !hasRootRole(u) {
+ return ErrPermissionDenied
+ }
+
+ return nil
+}
+
+func getUser(lg *zap.Logger, tx backend.BatchTx, username string) *authpb.User {
+ _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ user := &authpb.User{}
+ err := user.Unmarshal(vs[0])
+ if err != nil {
+ if lg != nil {
+ lg.Panic(
+ "failed to unmarshal 'authpb.User'",
+ zap.String("user-name", username),
+ zap.Error(err),
+ )
+ } else {
+ plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err)
+ }
+ }
+ return user
+}
+
+func getAllUsers(lg *zap.Logger, tx backend.BatchTx) []*authpb.User {
+ _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ users := make([]*authpb.User, len(vs))
+ for i := range vs {
+ user := &authpb.User{}
+ err := user.Unmarshal(vs[i])
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err))
+ } else {
+ plog.Panicf("failed to unmarshal user struct: %s", err)
+ }
+ }
+ users[i] = user
+ }
+ return users
+}
+
+func putUser(lg *zap.Logger, tx backend.BatchTx, user *authpb.User) {
+ b, err := user.Marshal()
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err))
+ } else {
+ plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err)
+ }
+ }
+ tx.UnsafePut(authUsersBucketName, user.Name, b)
+}
+
+func delUser(tx backend.BatchTx, username string) {
+ tx.UnsafeDelete(authUsersBucketName, []byte(username))
+}
+
+func getRole(tx backend.BatchTx, rolename string) *authpb.Role {
+ _, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ role := &authpb.Role{}
+ err := role.Unmarshal(vs[0])
+ if err != nil {
+ plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err)
+ }
+ return role
+}
+
+func getAllRoles(lg *zap.Logger, tx backend.BatchTx) []*authpb.Role {
+ _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ roles := make([]*authpb.Role, len(vs))
+ for i := range vs {
+ role := &authpb.Role{}
+ err := role.Unmarshal(vs[i])
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err))
+ } else {
+ plog.Panicf("failed to unmarshal role struct: %s", err)
+ }
+ }
+ roles[i] = role
+ }
+ return roles
+}
+
+func putRole(lg *zap.Logger, tx backend.BatchTx, role *authpb.Role) {
+ b, err := role.Marshal()
+ if err != nil {
+ if lg != nil {
+ lg.Panic(
+ "failed to marshal 'authpb.Role'",
+ zap.String("role-name", string(role.Name)),
+ zap.Error(err),
+ )
+ } else {
+ plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err)
+ }
+ }
+
+ tx.UnsafePut(authRolesBucketName, role.Name, b)
+}
+
+func delRole(tx backend.BatchTx, rolename string) {
+ tx.UnsafeDelete(authRolesBucketName, []byte(rolename))
+}
+
+func (as *authStore) IsAuthEnabled() bool {
+ as.enabledMu.RLock()
+ defer as.enabledMu.RUnlock()
+ return as.enabled
+}
+
+// NewAuthStore creates a new AuthStore.
+func NewAuthStore(lg *zap.Logger, be backend.Backend, tp TokenProvider, bcryptCost int) *authStore {
+ if bcryptCost < bcrypt.MinCost || bcryptCost > bcrypt.MaxCost {
+ if lg != nil {
+ lg.Warn(
+ "use default bcrypt cost instead of the invalid given cost",
+ zap.Int("min-cost", bcrypt.MinCost),
+ zap.Int("max-cost", bcrypt.MaxCost),
+ zap.Int("default-cost", bcrypt.DefaultCost),
+ zap.Int("given-cost", bcryptCost))
+ } else {
+ plog.Warningf("Use default bcrypt-cost %d instead of the invalid value %d",
+ bcrypt.DefaultCost, bcryptCost)
+ }
+
+ bcryptCost = bcrypt.DefaultCost
+ }
+
+ tx := be.BatchTx()
+ tx.Lock()
+
+ tx.UnsafeCreateBucket(authBucketName)
+ tx.UnsafeCreateBucket(authUsersBucketName)
+ tx.UnsafeCreateBucket(authRolesBucketName)
+
+ enabled := false
+ _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
+ if len(vs) == 1 {
+ if bytes.Equal(vs[0], authEnabled) {
+ enabled = true
+ }
+ }
+
+ as := &authStore{
+ revision: getRevision(tx),
+ lg: lg,
+ be: be,
+ enabled: enabled,
+ rangePermCache: make(map[string]*unifiedRangePermissions),
+ tokenProvider: tp,
+ bcryptCost: bcryptCost,
+ }
+
+ if enabled {
+ as.tokenProvider.enable()
+ }
+
+ if as.Revision() == 0 {
+ as.commitRevision(tx)
+ }
+
+ as.setupMetricsReporter()
+
+ tx.Unlock()
+ be.ForceCommit()
+
+ return as
+}
+
+func hasRootRole(u *authpb.User) bool {
+ // u.Roles is sorted in UserGrantRole(), so we can use binary search.
+ idx := sort.SearchStrings(u.Roles, rootRole)
+ return idx != len(u.Roles) && u.Roles[idx] == rootRole
+}
+
+func (as *authStore) commitRevision(tx backend.BatchTx) {
+ atomic.AddUint64(&as.revision, 1)
+ revBytes := make([]byte, revBytesLen)
+ binary.BigEndian.PutUint64(revBytes, as.Revision())
+ tx.UnsafePut(authBucketName, revisionKey, revBytes)
+}
+
+func getRevision(tx backend.BatchTx) uint64 {
+ _, vs := tx.UnsafeRange(authBucketName, revisionKey, nil, 0)
+ if len(vs) != 1 {
+ // this can happen in the initialization phase
+ return 0
+ }
+ return binary.BigEndian.Uint64(vs[0])
+}
+
+func (as *authStore) setRevision(rev uint64) {
+ atomic.StoreUint64(&as.revision, rev)
+}
+
+func (as *authStore) Revision() uint64 {
+ return atomic.LoadUint64(&as.revision)
+}
+
+func (as *authStore) AuthInfoFromTLS(ctx context.Context) (ai *AuthInfo) {
+ peer, ok := peer.FromContext(ctx)
+ if !ok || peer == nil || peer.AuthInfo == nil {
+ return nil
+ }
+
+ tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
+ for _, chains := range tlsInfo.State.VerifiedChains {
+ if len(chains) < 1 {
+ continue
+ }
+ ai = &AuthInfo{
+ Username: chains[0].Subject.CommonName,
+ Revision: as.Revision(),
+ }
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ return nil
+ }
+
+ // gRPC-gateway proxy request to etcd server includes Grpcgateway-Accept
+ // header. The proxy uses etcd client server certificate. If the certificate
+ // has a CommonName we should never use this for authentication.
+ if gw := md["grpcgateway-accept"]; len(gw) > 0 {
+ if as.lg != nil {
+ as.lg.Warn(
+ "ignoring common name in gRPC-gateway proxy request",
+ zap.String("common-name", ai.Username),
+ zap.String("user-name", ai.Username),
+ zap.Uint64("revision", ai.Revision),
+ )
+ } else {
+ plog.Warningf("ignoring common name in gRPC-gateway proxy request %s", ai.Username)
+ }
+ return nil
+ }
+ if as.lg != nil {
+ as.lg.Debug(
+ "found command name",
+ zap.String("common-name", ai.Username),
+ zap.String("user-name", ai.Username),
+ zap.Uint64("revision", ai.Revision),
+ )
+ } else {
+ plog.Debugf("found common name %s", ai.Username)
+ }
+ break
+ }
+ return ai
+}
+
+func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ return nil, nil
+ }
+
+ //TODO(mitake|hexfusion) review unifying key names
+ ts, ok := md[rpctypes.TokenFieldNameGRPC]
+ if !ok {
+ ts, ok = md[rpctypes.TokenFieldNameSwagger]
+ }
+ if !ok {
+ return nil, nil
+ }
+
+ token := ts[0]
+ authInfo, uok := as.authInfoFromToken(ctx, token)
+ if !uok {
+ if as.lg != nil {
+ as.lg.Warn("invalid auth token", zap.String("token", token))
+ } else {
+ plog.Warningf("invalid auth token: %s", token)
+ }
+ return nil, ErrInvalidAuthToken
+ }
+
+ return authInfo, nil
+}
+
+func (as *authStore) GenTokenPrefix() (string, error) {
+ return as.tokenProvider.genTokenPrefix()
+}
+
+func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, error) {
+ opts := strings.Split(optstr, ",")
+ tokenType := opts[0]
+
+ typeSpecificOpts := make(map[string]string)
+ for i := 1; i < len(opts); i++ {
+ pair := strings.Split(opts[i], "=")
+
+ if len(pair) != 2 {
+ if lg != nil {
+ lg.Warn("invalid token option", zap.String("option", optstr))
+ } else {
+ plog.Errorf("invalid token specific option: %s", optstr)
+ }
+ return "", nil, ErrInvalidAuthOpts
+ }
+
+ if _, ok := typeSpecificOpts[pair[0]]; ok {
+ if lg != nil {
+ lg.Warn(
+ "invalid token option",
+ zap.String("option", optstr),
+ zap.String("duplicate-parameter", pair[0]),
+ )
+ } else {
+ plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr)
+ }
+ return "", nil, ErrInvalidAuthOpts
+ }
+
+ typeSpecificOpts[pair[0]] = pair[1]
+ }
+
+ return tokenType, typeSpecificOpts, nil
+
+}
+
+// NewTokenProvider creates a new token provider.
+func NewTokenProvider(
+ lg *zap.Logger,
+ tokenOpts string,
+ indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) {
+ tokenType, typeSpecificOpts, err := decomposeOpts(lg, tokenOpts)
+ if err != nil {
+ return nil, ErrInvalidAuthOpts
+ }
+
+ switch tokenType {
+ case tokenTypeSimple:
+ if lg != nil {
+ lg.Warn("simple token is not cryptographically signed")
+ } else {
+ plog.Warningf("simple token is not cryptographically signed")
+ }
+ return newTokenProviderSimple(lg, indexWaiter), nil
+
+ case tokenTypeJWT:
+ return newTokenProviderJWT(lg, typeSpecificOpts)
+
+ case "":
+ return newTokenProviderNop()
+
+ default:
+ if lg != nil {
+ lg.Warn(
+ "unknown token type",
+ zap.String("type", tokenType),
+ zap.Error(ErrInvalidAuthOpts),
+ )
+ } else {
+ plog.Errorf("unknown token type: %s", tokenType)
+ }
+ return nil, ErrInvalidAuthOpts
+ }
+}
+
+func (as *authStore) WithRoot(ctx context.Context) context.Context {
+ if !as.IsAuthEnabled() {
+ return ctx
+ }
+
+ var ctxForAssign context.Context
+ if ts, ok := as.tokenProvider.(*tokenSimple); ok && ts != nil {
+ ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0))
+ prefix, err := ts.genTokenPrefix()
+ if err != nil {
+ if as.lg != nil {
+ as.lg.Warn(
+ "failed to generate prefix of internally used token",
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("failed to generate prefix of internally used token")
+ }
+ return ctx
+ }
+ ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix)
+ } else {
+ ctxForAssign = ctx
+ }
+
+ token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision())
+ if err != nil {
+ // this must not happen
+ if as.lg != nil {
+ as.lg.Warn(
+ "failed to assign token for lease revoking",
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("failed to assign token for lease revoking: %s", err)
+ }
+ return ctx
+ }
+
+ mdMap := map[string]string{
+ rpctypes.TokenFieldNameGRPC: token,
+ }
+ tokenMD := metadata.New(mdMap)
+
+ // use "mdIncomingKey{}" since it's called from local etcdserver
+ return metadata.NewIncomingContext(ctx, tokenMD)
+}
+
+func (as *authStore) HasRole(user, role string) bool {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ u := getUser(as.lg, tx, user)
+ tx.Unlock()
+
+ if u == nil {
+ if as.lg != nil {
+ as.lg.Warn(
+ "'has-role' requested for non-existing user",
+ zap.String("user-name", user),
+ zap.String("role-name", role),
+ )
+ } else {
+ plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user)
+ }
+ return false
+ }
+
+ for _, r := range u.Roles {
+ if role == r {
+ return true
+ }
+ }
+ return false
+}
+
+func (as *authStore) BcryptCost() int {
+ return as.bcryptCost
+}
+
+func (as *authStore) saveConsistentIndex(tx backend.BatchTx) {
+ if as.syncConsistentIndex != nil {
+ as.syncConsistentIndex(tx)
+ } else {
+ if as.lg != nil {
+ as.lg.Error("failed to save consistentIndex,syncConsistentIndex is nil")
+ } else {
+ plog.Error("failed to save consistentIndex,syncConsistentIndex is nil")
+ }
+ }
+}
+
+func (as *authStore) setupMetricsReporter() {
+ reportCurrentAuthRevMu.Lock()
+ reportCurrentAuthRev = func() float64 {
+ return float64(as.Revision())
+ }
+ reportCurrentAuthRevMu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/client/README.md b/vendor/go.etcd.io/etcd/client/README.md
new file mode 100644
index 000000000000..521d6c012077
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/README.md
@@ -0,0 +1,112 @@
+# etcd/client
+
+etcd/client is the Go client library for etcd.
+
+[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client)
+
+For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
+
+## Install
+
+```bash
+go get go.etcd.io/etcd/client
+```
+
+## Usage
+
+```go
+package main
+
+import (
+ "log"
+ "time"
+ "context"
+
+ "go.etcd.io/etcd/client"
+)
+
+func main() {
+ cfg := client.Config{
+ Endpoints: []string{"http://127.0.0.1:2379"},
+ Transport: client.DefaultTransport,
+ // set timeout per request to fail fast when the target endpoint is unavailable
+ HeaderTimeoutPerRequest: time.Second,
+ }
+ c, err := client.New(cfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+ kapi := client.NewKeysAPI(c)
+ // set "/foo" key with "bar" value
+ log.Print("Setting '/foo' key with 'bar' value")
+ resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
+ if err != nil {
+ log.Fatal(err)
+ } else {
+ // print common key info
+ log.Printf("Set is done. Metadata is %q\n", resp)
+ }
+ // get "/foo" key's value
+ log.Print("Getting '/foo' key value")
+ resp, err = kapi.Get(context.Background(), "/foo", nil)
+ if err != nil {
+ log.Fatal(err)
+ } else {
+ // print common key info
+ log.Printf("Get is done. Metadata is %q\n", resp)
+ // print value
+ log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
+ }
+}
+```
+
+## Error Handling
+
+etcd client might return three types of errors.
+
+- context error
+
+Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
+
+- cluster error
+
+Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
+
+- response error
+
+If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
+
+Here is the example code to handle client errors:
+
+```go
+cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
+c, err := client.New(cfg)
+if err != nil {
+ log.Fatal(err)
+}
+
+kapi := client.NewKeysAPI(c)
+resp, err := kapi.Set(ctx, "test", "bar", nil)
+if err != nil {
+ if err == context.Canceled {
+ // ctx is canceled by another routine
+ } else if err == context.DeadlineExceeded {
+ // ctx is attached with a deadline and it exceeded
+ } else if cerr, ok := err.(*client.ClusterError); ok {
+ // process (cerr.Errors)
+ } else {
+ // bad cluster endpoints, which are not etcd servers
+ }
+}
+```
+
+
+## Caveat
+
+1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
+
+2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
+
+3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
+
+4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
diff --git a/vendor/go.etcd.io/etcd/client/auth_role.go b/vendor/go.etcd.io/etcd/client/auth_role.go
new file mode 100644
index 000000000000..b6ba7e150dc6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/auth_role.go
@@ -0,0 +1,236 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+)
+
+type Role struct {
+ Role string `json:"role"`
+ Permissions Permissions `json:"permissions"`
+ Grant *Permissions `json:"grant,omitempty"`
+ Revoke *Permissions `json:"revoke,omitempty"`
+}
+
+type Permissions struct {
+ KV rwPermission `json:"kv"`
+}
+
+type rwPermission struct {
+ Read []string `json:"read"`
+ Write []string `json:"write"`
+}
+
+type PermissionType int
+
+const (
+ ReadPermission PermissionType = iota
+ WritePermission
+ ReadWritePermission
+)
+
+// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
+// interact with etcd's role creation and modification features.
+func NewAuthRoleAPI(c Client) AuthRoleAPI {
+ return &httpAuthRoleAPI{
+ client: c,
+ }
+}
+
+type AuthRoleAPI interface {
+ // AddRole adds a role.
+ AddRole(ctx context.Context, role string) error
+
+ // RemoveRole removes a role.
+ RemoveRole(ctx context.Context, role string) error
+
+ // GetRole retrieves role details.
+ GetRole(ctx context.Context, role string) (*Role, error)
+
+ // GrantRoleKV grants a role some permission prefixes for the KV store.
+ GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
+
+ // RevokeRoleKV revokes some permission prefixes for a role on the KV store.
+ RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
+
+ // ListRoles lists roles.
+ ListRoles(ctx context.Context) ([]string, error)
+}
+
+type httpAuthRoleAPI struct {
+ client httpClient
+}
+
+type authRoleAPIAction struct {
+ verb string
+ name string
+ role *Role
+}
+
+type authRoleAPIList struct{}
+
+func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "roles", "")
+ req, _ := http.NewRequest("GET", u.String(), nil)
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "roles", l.name)
+ if l.role == nil {
+ req, _ := http.NewRequest(l.verb, u.String(), nil)
+ return req
+ }
+ b, err := json.Marshal(l.role)
+ if err != nil {
+ panic(err)
+ }
+ body := bytes.NewReader(b)
+ req, _ := http.NewRequest(l.verb, u.String(), body)
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
+ resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
+ if err != nil {
+ return nil, err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ return nil, err
+ }
+ var roleList struct {
+ Roles []Role `json:"roles"`
+ }
+ if err = json.Unmarshal(body, &roleList); err != nil {
+ return nil, err
+ }
+ ret := make([]string, 0, len(roleList.Roles))
+ for _, r := range roleList.Roles {
+ ret = append(ret, r.Role)
+ }
+ return ret, nil
+}
+
+func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
+ role := &Role{
+ Role: rolename,
+ }
+ return r.addRemoveRole(ctx, &authRoleAPIAction{
+ verb: "PUT",
+ name: rolename,
+ role: role,
+ })
+}
+
+func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
+ return r.addRemoveRole(ctx, &authRoleAPIAction{
+ verb: "DELETE",
+ name: rolename,
+ })
+}
+
+func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
+ resp, body, err := r.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+ if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
+ var sec authError
+ err := json.Unmarshal(body, &sec)
+ if err != nil {
+ return err
+ }
+ return sec
+ }
+ return nil
+}
+
+func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
+ return r.modRole(ctx, &authRoleAPIAction{
+ verb: "GET",
+ name: rolename,
+ })
+}
+
+func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
+ var out rwPermission
+ switch permType {
+ case ReadPermission:
+ out.Read = prefixes
+ case WritePermission:
+ out.Write = prefixes
+ case ReadWritePermission:
+ out.Read = prefixes
+ out.Write = prefixes
+ }
+ return out
+}
+
+func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
+ rwp := buildRWPermission(prefixes, permType)
+ role := &Role{
+ Role: rolename,
+ Grant: &Permissions{
+ KV: rwp,
+ },
+ }
+ return r.modRole(ctx, &authRoleAPIAction{
+ verb: "PUT",
+ name: rolename,
+ role: role,
+ })
+}
+
+func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
+ rwp := buildRWPermission(prefixes, permType)
+ role := &Role{
+ Role: rolename,
+ Revoke: &Permissions{
+ KV: rwp,
+ },
+ }
+ return r.modRole(ctx, &authRoleAPIAction{
+ verb: "PUT",
+ name: rolename,
+ role: role,
+ })
+}
+
+func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
+ resp, body, err := r.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return nil, err
+ }
+ return nil, sec
+ }
+ var role Role
+ if err = json.Unmarshal(body, &role); err != nil {
+ return nil, err
+ }
+ return &role, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/auth_user.go b/vendor/go.etcd.io/etcd/client/auth_user.go
new file mode 100644
index 000000000000..8e7e2efe8333
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/auth_user.go
@@ -0,0 +1,319 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "path"
+)
+
+var (
+ defaultV2AuthPrefix = "/v2/auth"
+)
+
+type User struct {
+ User string `json:"user"`
+ Password string `json:"password,omitempty"`
+ Roles []string `json:"roles"`
+ Grant []string `json:"grant,omitempty"`
+ Revoke []string `json:"revoke,omitempty"`
+}
+
+// userListEntry is the user representation given by the server for ListUsers
+type userListEntry struct {
+ User string `json:"user"`
+ Roles []Role `json:"roles"`
+}
+
+type UserRoles struct {
+ User string `json:"user"`
+ Roles []Role `json:"roles"`
+}
+
+func v2AuthURL(ep url.URL, action string, name string) *url.URL {
+ if name != "" {
+ ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
+ return &ep
+ }
+ ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
+ return &ep
+}
+
+// NewAuthAPI constructs a new AuthAPI that uses HTTP to
+// interact with etcd's general auth features.
+func NewAuthAPI(c Client) AuthAPI {
+ return &httpAuthAPI{
+ client: c,
+ }
+}
+
+type AuthAPI interface {
+ // Enable auth.
+ Enable(ctx context.Context) error
+
+ // Disable auth.
+ Disable(ctx context.Context) error
+}
+
+type httpAuthAPI struct {
+ client httpClient
+}
+
+func (s *httpAuthAPI) Enable(ctx context.Context) error {
+ return s.enableDisable(ctx, &authAPIAction{"PUT"})
+}
+
+func (s *httpAuthAPI) Disable(ctx context.Context) error {
+ return s.enableDisable(ctx, &authAPIAction{"DELETE"})
+}
+
+func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
+ resp, body, err := s.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return err
+ }
+ return sec
+ }
+ return nil
+}
+
+type authAPIAction struct {
+ verb string
+}
+
+func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "enable", "")
+ req, _ := http.NewRequest(l.verb, u.String(), nil)
+ return req
+}
+
+type authError struct {
+ Message string `json:"message"`
+ Code int `json:"-"`
+}
+
+func (e authError) Error() string {
+ return e.Message
+}
+
+// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
+// interact with etcd's user creation and modification features.
+func NewAuthUserAPI(c Client) AuthUserAPI {
+ return &httpAuthUserAPI{
+ client: c,
+ }
+}
+
+type AuthUserAPI interface {
+ // AddUser adds a user.
+ AddUser(ctx context.Context, username string, password string) error
+
+ // RemoveUser removes a user.
+ RemoveUser(ctx context.Context, username string) error
+
+ // GetUser retrieves user details.
+ GetUser(ctx context.Context, username string) (*User, error)
+
+ // GrantUser grants a user some permission roles.
+ GrantUser(ctx context.Context, username string, roles []string) (*User, error)
+
+ // RevokeUser revokes some permission roles from a user.
+ RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
+
+ // ChangePassword changes the user's password.
+ ChangePassword(ctx context.Context, username string, password string) (*User, error)
+
+ // ListUsers lists the users.
+ ListUsers(ctx context.Context) ([]string, error)
+}
+
+type httpAuthUserAPI struct {
+ client httpClient
+}
+
+type authUserAPIAction struct {
+ verb string
+ username string
+ user *User
+}
+
+type authUserAPIList struct{}
+
+func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "users", "")
+ req, _ := http.NewRequest("GET", u.String(), nil)
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "users", l.username)
+ if l.user == nil {
+ req, _ := http.NewRequest(l.verb, u.String(), nil)
+ return req
+ }
+ b, err := json.Marshal(l.user)
+ if err != nil {
+ panic(err)
+ }
+ body := bytes.NewReader(b)
+ req, _ := http.NewRequest(l.verb, u.String(), body)
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
+ resp, body, err := u.client.Do(ctx, &authUserAPIList{})
+ if err != nil {
+ return nil, err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return nil, err
+ }
+ return nil, sec
+ }
+
+ var userList struct {
+ Users []userListEntry `json:"users"`
+ }
+
+ if err = json.Unmarshal(body, &userList); err != nil {
+ return nil, err
+ }
+
+ ret := make([]string, 0, len(userList.Users))
+ for _, u := range userList.Users {
+ ret = append(ret, u.User)
+ }
+ return ret, nil
+}
+
+func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
+ user := &User{
+ User: username,
+ Password: password,
+ }
+ return u.addRemoveUser(ctx, &authUserAPIAction{
+ verb: "PUT",
+ username: username,
+ user: user,
+ })
+}
+
+func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
+ return u.addRemoveUser(ctx, &authUserAPIAction{
+ verb: "DELETE",
+ username: username,
+ })
+}
+
+func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
+ resp, body, err := u.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return err
+ }
+ return sec
+ }
+ return nil
+}
+
+func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
+ return u.modUser(ctx, &authUserAPIAction{
+ verb: "GET",
+ username: username,
+ })
+}
+
+func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
+ user := &User{
+ User: username,
+ Grant: roles,
+ }
+ return u.modUser(ctx, &authUserAPIAction{
+ verb: "PUT",
+ username: username,
+ user: user,
+ })
+}
+
+func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
+ user := &User{
+ User: username,
+ Revoke: roles,
+ }
+ return u.modUser(ctx, &authUserAPIAction{
+ verb: "PUT",
+ username: username,
+ user: user,
+ })
+}
+
+func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
+ user := &User{
+ User: username,
+ Password: password,
+ }
+ return u.modUser(ctx, &authUserAPIAction{
+ verb: "PUT",
+ username: username,
+ user: user,
+ })
+}
+
+func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
+ resp, body, err := u.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return nil, err
+ }
+ return nil, sec
+ }
+ var user User
+ if err = json.Unmarshal(body, &user); err != nil {
+ var userR UserRoles
+ if urerr := json.Unmarshal(body, &userR); urerr != nil {
+ return nil, err
+ }
+ user.User = userR.User
+ for _, r := range userR.Roles {
+ user.Roles = append(user.Roles, r.Role)
+ }
+ }
+ return &user, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/cancelreq.go b/vendor/go.etcd.io/etcd/client/cancelreq.go
new file mode 100644
index 000000000000..76d1f040198b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/cancelreq.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// borrowed from golang/net/context/ctxhttp/cancelreq.go
+
+package client
+
+import "net/http"
+
+func requestCanceler(tr CancelableTransport, req *http.Request) func() {
+ ch := make(chan struct{})
+ req.Cancel = ch
+
+ return func() {
+ close(ch)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/client/client.go b/vendor/go.etcd.io/etcd/client/client.go
new file mode 100644
index 000000000000..de9ab798e487
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/client.go
@@ -0,0 +1,710 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/version"
+)
+
+var (
+ ErrNoEndpoints = errors.New("client: no endpoints available")
+ ErrTooManyRedirects = errors.New("client: too many redirects")
+ ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
+ ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
+ errTooManyRedirectChecks = errors.New("client: too many redirect checks")
+
+ // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
+ // that Do() will not retry a request
+ oneShotCtxValue interface{}
+)
+
+var DefaultRequestTimeout = 5 * time.Second
+
+var DefaultTransport CancelableTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+}
+
+type EndpointSelectionMode int
+
+const (
+ // EndpointSelectionRandom is the default value of the 'SelectionMode'.
+ // As the name implies, the client object will pick a node from the members
+ // of the cluster in a random fashion. If the cluster has three members, A, B,
+ // and C, the client picks any node from its three members as its request
+ // destination.
+ EndpointSelectionRandom EndpointSelectionMode = iota
+
+ // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
+ // requests are sent directly to the cluster leader. This reduces
+ // forwarding roundtrips compared to making requests to etcd followers
+ // who then forward them to the cluster leader. In the event of a leader
+ // failure, however, clients configured this way cannot prioritize among
+ // the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
+ // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
+ // maintain its knowledge of current cluster state.
+ //
+ // This mode should be used with Client.AutoSync().
+ EndpointSelectionPrioritizeLeader
+)
+
+type Config struct {
+ // Endpoints defines a set of URLs (schemes, hosts and ports only)
+ // that can be used to communicate with a logical etcd cluster. For
+ // example, a three-node cluster could be provided like so:
+ //
+ // Endpoints: []string{
+ // "http://node1.example.com:2379",
+ // "http://node2.example.com:2379",
+ // "http://node3.example.com:2379",
+ // }
+ //
+ // If multiple endpoints are provided, the Client will attempt to
+ // use them all in the event that one or more of them are unusable.
+ //
+ // If Client.Sync is ever called, the Client may cache an alternate
+ // set of endpoints to continue operation.
+ Endpoints []string
+
+ // Transport is used by the Client to drive HTTP requests. If not
+ // provided, DefaultTransport will be used.
+ Transport CancelableTransport
+
+ // CheckRedirect specifies the policy for handling HTTP redirects.
+ // If CheckRedirect is not nil, the Client calls it before
+ // following an HTTP redirect. The sole argument is the number of
+ // requests that have already been made. If CheckRedirect returns
+ // an error, Client.Do will not make any further requests and return
+ // the error back it to the caller.
+ //
+ // If CheckRedirect is nil, the Client uses its default policy,
+ // which is to stop after 10 consecutive requests.
+ CheckRedirect CheckRedirectFunc
+
+ // Username specifies the user credential to add as an authorization header
+ Username string
+
+ // Password is the password for the specified user to add as an authorization header
+ // to the request.
+ Password string
+
+ // HeaderTimeoutPerRequest specifies the time limit to wait for response
+ // header in a single request made by the Client. The timeout includes
+ // connection time, any redirects, and header wait time.
+ //
+ // For non-watch GET request, server returns the response body immediately.
+ // For PUT/POST/DELETE request, server will attempt to commit request
+ // before responding, which is expected to take `100ms + 2 * RTT`.
+ // For watch request, server returns the header immediately to notify Client
+ // watch start. But if server is behind some kind of proxy, the response
+ // header may be cached at proxy, and Client cannot rely on this behavior.
+ //
+ // Especially, wait request will ignore this timeout.
+ //
+ // One API call may send multiple requests to different etcd servers until it
+ // succeeds. Use context of the API to specify the overall timeout.
+ //
+ // A HeaderTimeoutPerRequest of zero means no timeout.
+ HeaderTimeoutPerRequest time.Duration
+
+ // SelectionMode is an EndpointSelectionMode enum that specifies the
+ // policy for choosing the etcd cluster node to which requests are sent.
+ SelectionMode EndpointSelectionMode
+}
+
+func (cfg *Config) transport() CancelableTransport {
+ if cfg.Transport == nil {
+ return DefaultTransport
+ }
+ return cfg.Transport
+}
+
+func (cfg *Config) checkRedirect() CheckRedirectFunc {
+ if cfg.CheckRedirect == nil {
+ return DefaultCheckRedirect
+ }
+ return cfg.CheckRedirect
+}
+
+// CancelableTransport mimics net/http.Transport, but requires that
+// the object also support request cancellation.
+type CancelableTransport interface {
+ http.RoundTripper
+ CancelRequest(req *http.Request)
+}
+
+type CheckRedirectFunc func(via int) error
+
+// DefaultCheckRedirect follows up to 10 redirects, but no more.
+var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
+ if via > 10 {
+ return ErrTooManyRedirects
+ }
+ return nil
+}
+
+type Client interface {
+ // Sync updates the internal cache of the etcd cluster's membership.
+ Sync(context.Context) error
+
+ // AutoSync periodically calls Sync() every given interval.
+ // The recommended sync interval is 10 seconds to 1 minute, which does
+ // not bring too much overhead to server and makes client catch up the
+ // cluster change in time.
+ //
+ // The example to use it:
+ //
+ // for {
+ // err := client.AutoSync(ctx, 10*time.Second)
+ // if err == context.DeadlineExceeded || err == context.Canceled {
+ // break
+ // }
+ // log.Print(err)
+ // }
+ AutoSync(context.Context, time.Duration) error
+
+ // Endpoints returns a copy of the current set of API endpoints used
+ // by Client to resolve HTTP requests. If Sync has ever been called,
+ // this may differ from the initial Endpoints provided in the Config.
+ Endpoints() []string
+
+ // SetEndpoints sets the set of API endpoints used by Client to resolve
+ // HTTP requests. If the given endpoints are not valid, an error will be
+ // returned
+ SetEndpoints(eps []string) error
+
+ // GetVersion retrieves the current etcd server and cluster version
+ GetVersion(ctx context.Context) (*version.Versions, error)
+
+ httpClient
+}
+
+func New(cfg Config) (Client, error) {
+ c := &httpClusterClient{
+ clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
+ rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
+ selectionMode: cfg.SelectionMode,
+ }
+ if cfg.Username != "" {
+ c.credentials = &credentials{
+ username: cfg.Username,
+ password: cfg.Password,
+ }
+ }
+ if err := c.SetEndpoints(cfg.Endpoints); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+type httpClient interface {
+ Do(context.Context, httpAction) (*http.Response, []byte, error)
+}
+
+func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
+ return func(ep url.URL) httpClient {
+ return &redirectFollowingHTTPClient{
+ checkRedirect: cr,
+ client: &simpleHTTPClient{
+ transport: tr,
+ endpoint: ep,
+ headerTimeout: headerTimeout,
+ },
+ }
+ }
+}
+
+type credentials struct {
+ username string
+ password string
+}
+
+type httpClientFactory func(url.URL) httpClient
+
+type httpAction interface {
+ HTTPRequest(url.URL) *http.Request
+}
+
+type httpClusterClient struct {
+ clientFactory httpClientFactory
+ endpoints []url.URL
+ pinned int
+ credentials *credentials
+ sync.RWMutex
+ rand *rand.Rand
+ selectionMode EndpointSelectionMode
+}
+
+func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
+ ceps := make([]url.URL, len(eps))
+ copy(ceps, eps)
+
+ // To perform a lookup on the new endpoint list without using the current
+ // client, we'll copy it
+ clientCopy := &httpClusterClient{
+ clientFactory: c.clientFactory,
+ credentials: c.credentials,
+ rand: c.rand,
+
+ pinned: 0,
+ endpoints: ceps,
+ }
+
+ mAPI := NewMembersAPI(clientCopy)
+ leader, err := mAPI.Leader(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(leader.ClientURLs) == 0 {
+ return "", ErrNoLeaderEndpoint
+ }
+
+ return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
+}
+
+func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
+ if len(eps) == 0 {
+ return []url.URL{}, ErrNoEndpoints
+ }
+
+ neps := make([]url.URL, len(eps))
+ for i, ep := range eps {
+ u, err := url.Parse(ep)
+ if err != nil {
+ return []url.URL{}, err
+ }
+ neps[i] = *u
+ }
+ return neps, nil
+}
+
+func (c *httpClusterClient) SetEndpoints(eps []string) error {
+ neps, err := c.parseEndpoints(eps)
+ if err != nil {
+ return err
+ }
+
+ c.Lock()
+ defer c.Unlock()
+
+ c.endpoints = shuffleEndpoints(c.rand, neps)
+ // We're not doing anything for PrioritizeLeader here. This is
+ // due to not having a context meaning we can't call getLeaderEndpoint
+ // However, if you're using PrioritizeLeader, you've already been told
+ // to regularly call sync, where we do have a ctx, and can figure the
+ // leader. PrioritizeLeader is also quite a loose guarantee, so deal
+ // with it
+ c.pinned = 0
+
+ return nil
+}
+
+func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ action := act
+ c.RLock()
+ leps := len(c.endpoints)
+ eps := make([]url.URL, leps)
+ n := copy(eps, c.endpoints)
+ pinned := c.pinned
+
+ if c.credentials != nil {
+ action = &authedAction{
+ act: act,
+ credentials: *c.credentials,
+ }
+ }
+ c.RUnlock()
+
+ if leps == 0 {
+ return nil, nil, ErrNoEndpoints
+ }
+
+ if leps != n {
+ return nil, nil, errors.New("unable to pick endpoint: copy failed")
+ }
+
+ var resp *http.Response
+ var body []byte
+ var err error
+ cerr := &ClusterError{}
+ isOneShot := ctx.Value(&oneShotCtxValue) != nil
+
+ for i := pinned; i < leps+pinned; i++ {
+ k := i % leps
+ hc := c.clientFactory(eps[k])
+ resp, body, err = hc.Do(ctx, action)
+ if err != nil {
+ cerr.Errors = append(cerr.Errors, err)
+ if err == ctx.Err() {
+ return nil, nil, ctx.Err()
+ }
+ if err == context.Canceled || err == context.DeadlineExceeded {
+ return nil, nil, err
+ }
+ } else if resp.StatusCode/100 == 5 {
+ switch resp.StatusCode {
+ case http.StatusInternalServerError, http.StatusServiceUnavailable:
+ // TODO: make sure this is a no leader response
+ cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
+ default:
+ cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
+ }
+ err = cerr.Errors[0]
+ }
+ if err != nil {
+ if !isOneShot {
+ continue
+ }
+ c.Lock()
+ c.pinned = (k + 1) % leps
+ c.Unlock()
+ return nil, nil, err
+ }
+ if k != pinned {
+ c.Lock()
+ c.pinned = k
+ c.Unlock()
+ }
+ return resp, body, nil
+ }
+
+ return nil, nil, cerr
+}
+
+func (c *httpClusterClient) Endpoints() []string {
+ c.RLock()
+ defer c.RUnlock()
+
+ eps := make([]string, len(c.endpoints))
+ for i, ep := range c.endpoints {
+ eps[i] = ep.String()
+ }
+
+ return eps
+}
+
+func (c *httpClusterClient) Sync(ctx context.Context) error {
+ mAPI := NewMembersAPI(c)
+ ms, err := mAPI.List(ctx)
+ if err != nil {
+ return err
+ }
+
+ var eps []string
+ for _, m := range ms {
+ eps = append(eps, m.ClientURLs...)
+ }
+
+ neps, err := c.parseEndpoints(eps)
+ if err != nil {
+ return err
+ }
+
+ npin := 0
+
+ switch c.selectionMode {
+ case EndpointSelectionRandom:
+ c.RLock()
+ eq := endpointsEqual(c.endpoints, neps)
+ c.RUnlock()
+
+ if eq {
+ return nil
+ }
+ // When items in the endpoint list changes, we choose a new pin
+ neps = shuffleEndpoints(c.rand, neps)
+ case EndpointSelectionPrioritizeLeader:
+ nle, err := c.getLeaderEndpoint(ctx, neps)
+ if err != nil {
+ return ErrNoLeaderEndpoint
+ }
+
+ for i, n := range neps {
+ if n.String() == nle {
+ npin = i
+ break
+ }
+ }
+ default:
+ return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
+ }
+
+ c.Lock()
+ defer c.Unlock()
+ c.endpoints = neps
+ c.pinned = npin
+
+ return nil
+}
+
+func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+ for {
+ err := c.Sync(ctx)
+ if err != nil {
+ return err
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-ticker.C:
+ }
+ }
+}
+
+func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
+ act := &getAction{Prefix: "/version"}
+
+ resp, body, err := c.Do(ctx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ switch resp.StatusCode {
+ case http.StatusOK:
+ if len(body) == 0 {
+ return nil, ErrEmptyBody
+ }
+ var vresp version.Versions
+ if err := json.Unmarshal(body, &vresp); err != nil {
+ return nil, ErrInvalidJSON
+ }
+ return &vresp, nil
+ default:
+ var etcdErr Error
+ if err := json.Unmarshal(body, &etcdErr); err != nil {
+ return nil, ErrInvalidJSON
+ }
+ return nil, etcdErr
+ }
+}
+
+type roundTripResponse struct {
+ resp *http.Response
+ err error
+}
+
+type simpleHTTPClient struct {
+ transport CancelableTransport
+ endpoint url.URL
+ headerTimeout time.Duration
+}
+
+func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ req := act.HTTPRequest(c.endpoint)
+
+ if err := printcURL(req); err != nil {
+ return nil, nil, err
+ }
+
+ isWait := false
+ if req != nil && req.URL != nil {
+ ws := req.URL.Query().Get("wait")
+ if len(ws) != 0 {
+ var err error
+ isWait, err = strconv.ParseBool(ws)
+ if err != nil {
+ return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
+ }
+ }
+ }
+
+ var hctx context.Context
+ var hcancel context.CancelFunc
+ if !isWait && c.headerTimeout > 0 {
+ hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
+ } else {
+ hctx, hcancel = context.WithCancel(ctx)
+ }
+ defer hcancel()
+
+ reqcancel := requestCanceler(c.transport, req)
+
+ rtchan := make(chan roundTripResponse, 1)
+ go func() {
+ resp, err := c.transport.RoundTrip(req)
+ rtchan <- roundTripResponse{resp: resp, err: err}
+ close(rtchan)
+ }()
+
+ var resp *http.Response
+ var err error
+
+ select {
+ case rtresp := <-rtchan:
+ resp, err = rtresp.resp, rtresp.err
+ case <-hctx.Done():
+ // cancel and wait for request to actually exit before continuing
+ reqcancel()
+ rtresp := <-rtchan
+ resp = rtresp.resp
+ switch {
+ case ctx.Err() != nil:
+ err = ctx.Err()
+ case hctx.Err() != nil:
+ err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
+ default:
+ panic("failed to get error from context")
+ }
+ }
+
+ // always check for resp nil-ness to deal with possible
+ // race conditions between channels above
+ defer func() {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body []byte
+ done := make(chan struct{})
+ go func() {
+ body, err = ioutil.ReadAll(resp.Body)
+ done <- struct{}{}
+ }()
+
+ select {
+ case <-ctx.Done():
+ resp.Body.Close()
+ <-done
+ return nil, nil, ctx.Err()
+ case <-done:
+ }
+
+ return resp, body, err
+}
+
+type authedAction struct {
+ act httpAction
+ credentials credentials
+}
+
+func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
+ r := a.act.HTTPRequest(url)
+ r.SetBasicAuth(a.credentials.username, a.credentials.password)
+ return r
+}
+
+type redirectFollowingHTTPClient struct {
+ client httpClient
+ checkRedirect CheckRedirectFunc
+}
+
+func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ next := act
+ for i := 0; i < 100; i++ {
+ if i > 0 {
+ if err := r.checkRedirect(i); err != nil {
+ return nil, nil, err
+ }
+ }
+ resp, body, err := r.client.Do(ctx, next)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp.StatusCode/100 == 3 {
+ hdr := resp.Header.Get("Location")
+ if hdr == "" {
+ return nil, nil, fmt.Errorf("location header not set")
+ }
+ loc, err := url.Parse(hdr)
+ if err != nil {
+ return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr)
+ }
+ next = &redirectedHTTPAction{
+ action: act,
+ location: *loc,
+ }
+ continue
+ }
+ return resp, body, nil
+ }
+
+ return nil, nil, errTooManyRedirectChecks
+}
+
+type redirectedHTTPAction struct {
+ action httpAction
+ location url.URL
+}
+
+func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
+ orig := r.action.HTTPRequest(ep)
+ orig.URL = &r.location
+ return orig
+}
+
+func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
+ // copied from Go 1.9<= rand.Rand.Perm
+ n := len(eps)
+ p := make([]int, n)
+ for i := 0; i < n; i++ {
+ j := r.Intn(i + 1)
+ p[i] = p[j]
+ p[j] = i
+ }
+ neps := make([]url.URL, n)
+ for i, k := range p {
+ neps[i] = eps[k]
+ }
+ return neps
+}
+
+func endpointsEqual(left, right []url.URL) bool {
+ if len(left) != len(right) {
+ return false
+ }
+
+ sLeft := make([]string, len(left))
+ sRight := make([]string, len(right))
+ for i, l := range left {
+ sLeft[i] = l.String()
+ }
+ for i, r := range right {
+ sRight[i] = r.String()
+ }
+
+ sort.Strings(sLeft)
+ sort.Strings(sRight)
+ for i := range sLeft {
+ if sLeft[i] != sRight[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/go.etcd.io/etcd/client/cluster_error.go b/vendor/go.etcd.io/etcd/client/cluster_error.go
new file mode 100644
index 000000000000..34618cdbd9e6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/cluster_error.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import "fmt"
+
+type ClusterError struct {
+ Errors []error
+}
+
+func (ce *ClusterError) Error() string {
+ s := ErrClusterUnavailable.Error()
+ for i, e := range ce.Errors {
+ s += fmt.Sprintf("; error #%d: %s\n", i, e)
+ }
+ return s
+}
+
+func (ce *ClusterError) Detail() string {
+ s := ""
+ for i, e := range ce.Errors {
+ s += fmt.Sprintf("error #%d: %s\n", i, e)
+ }
+ return s
+}
diff --git a/vendor/go.etcd.io/etcd/client/curl.go b/vendor/go.etcd.io/etcd/client/curl.go
new file mode 100644
index 000000000000..c8bc9fba20ec
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/curl.go
@@ -0,0 +1,70 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+)
+
+var (
+ cURLDebug = false
+)
+
+func EnablecURLDebug() {
+ cURLDebug = true
+}
+
+func DisablecURLDebug() {
+ cURLDebug = false
+}
+
+// printcURL prints the cURL equivalent request to stderr.
+// It returns an error if the body of the request cannot
+// be read.
+// The caller MUST cancel the request if there is an error.
+func printcURL(req *http.Request) error {
+ if !cURLDebug {
+ return nil
+ }
+ var (
+ command string
+ b []byte
+ err error
+ )
+
+ if req.URL != nil {
+ command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
+ }
+
+ if req.Body != nil {
+ b, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return err
+ }
+ command += fmt.Sprintf(" -d %q", string(b))
+ }
+
+ fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
+
+ // reset body
+ body := bytes.NewBuffer(b)
+ req.Body = ioutil.NopCloser(body)
+
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/discover.go b/vendor/go.etcd.io/etcd/client/discover.go
new file mode 100644
index 000000000000..580c25626c98
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/discover.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "go.etcd.io/etcd/pkg/srv"
+)
+
+// Discoverer is an interface that wraps the Discover method.
+type Discoverer interface {
+ // Discover looks up the etcd servers for the domain.
+ Discover(domain string, serviceName string) ([]string, error)
+}
+
+type srvDiscover struct{}
+
+// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
+func NewSRVDiscover() Discoverer {
+ return &srvDiscover{}
+}
+
+func (d *srvDiscover) Discover(domain string, serviceName string) ([]string, error) {
+ srvs, err := srv.GetClient("etcd-client", domain, serviceName)
+ if err != nil {
+ return nil, err
+ }
+ return srvs.Endpoints, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/doc.go b/vendor/go.etcd.io/etcd/client/doc.go
new file mode 100644
index 000000000000..abe5199c319c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/doc.go
@@ -0,0 +1,73 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package client provides bindings for the etcd APIs.
+
+Create a Config and exchange it for a Client:
+
+ import (
+ "net/http"
+ "context"
+
+ "go.etcd.io/etcd/client"
+ )
+
+ cfg := client.Config{
+ Endpoints: []string{"http://127.0.0.1:2379"},
+ Transport: DefaultTransport,
+ }
+
+ c, err := client.New(cfg)
+ if err != nil {
+ // handle error
+ }
+
+Clients are safe for concurrent use by multiple goroutines.
+
+Create a KeysAPI using the Client, then use it to interact with etcd:
+
+ kAPI := client.NewKeysAPI(c)
+
+ // create a new key /foo with the value "bar"
+ _, err = kAPI.Create(context.Background(), "/foo", "bar")
+ if err != nil {
+ // handle error
+ }
+
+ // delete the newly created key only if the value is still "bar"
+ _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
+ if err != nil {
+ // handle error
+ }
+
+Use a custom context to set timeouts on your operations:
+
+ import "time"
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ // set a new key, ignoring its previous state
+ _, err := kAPI.Set(ctx, "/ping", "pong", nil)
+ if err != nil {
+ if err == context.DeadlineExceeded {
+ // request took longer than 5s
+ } else {
+ // handle error
+ }
+ }
+
+*/
+package client
diff --git a/vendor/go.etcd.io/etcd/client/json.go b/vendor/go.etcd.io/etcd/client/json.go
new file mode 100644
index 000000000000..97cdbcd7cfa5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/json.go
@@ -0,0 +1,72 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "github.com/json-iterator/go"
+ "github.com/modern-go/reflect2"
+ "strconv"
+ "unsafe"
+)
+
+type customNumberExtension struct {
+ jsoniter.DummyExtension
+}
+
+func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
+ if typ.String() == "interface {}" {
+ return customNumberDecoder{}
+ }
+ return nil
+}
+
+type customNumberDecoder struct {
+}
+
+func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ switch iter.WhatIsNext() {
+ case jsoniter.NumberValue:
+ var number jsoniter.Number
+ iter.ReadVal(&number)
+ i64, err := strconv.ParseInt(string(number), 10, 64)
+ if err == nil {
+ *(*interface{})(ptr) = i64
+ return
+ }
+ f64, err := strconv.ParseFloat(string(number), 64)
+ if err == nil {
+ *(*interface{})(ptr) = f64
+ return
+ }
+ iter.ReportError("DecodeNumber", err.Error())
+ default:
+ *(*interface{})(ptr) = iter.Read()
+ }
+}
+
+// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be
+// case-sensitive when unmarshalling, and otherwise compatible with
+// the encoding/json standard library.
+func caseSensitiveJsonIterator() jsoniter.API {
+ config := jsoniter.Config{
+ EscapeHTML: true,
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+ CaseSensitive: true,
+ }.Froze()
+ // Force jsoniter to decode number to interface{} via int64/float64, if possible.
+ config.RegisterExtension(&customNumberExtension{})
+ return config
+}
diff --git a/vendor/go.etcd.io/etcd/client/keys.go b/vendor/go.etcd.io/etcd/client/keys.go
new file mode 100644
index 000000000000..ec53830c7f0a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/keys.go
@@ -0,0 +1,679 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "go.etcd.io/etcd/pkg/pathutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ ErrorCodeKeyNotFound = 100
+ ErrorCodeTestFailed = 101
+ ErrorCodeNotFile = 102
+ ErrorCodeNotDir = 104
+ ErrorCodeNodeExist = 105
+ ErrorCodeRootROnly = 107
+ ErrorCodeDirNotEmpty = 108
+ ErrorCodeUnauthorized = 110
+
+ ErrorCodePrevValueRequired = 201
+ ErrorCodeTTLNaN = 202
+ ErrorCodeIndexNaN = 203
+ ErrorCodeInvalidField = 209
+ ErrorCodeInvalidForm = 210
+
+ ErrorCodeRaftInternal = 300
+ ErrorCodeLeaderElect = 301
+
+ ErrorCodeWatcherCleared = 400
+ ErrorCodeEventIndexCleared = 401
+)
+
+type Error struct {
+ Code int `json:"errorCode"`
+ Message string `json:"message"`
+ Cause string `json:"cause"`
+ Index uint64 `json:"index"`
+}
+
+func (e Error) Error() string {
+ return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
+}
+
+var (
+ ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint")
+ ErrEmptyBody = errors.New("client: response body is empty")
+)
+
+// PrevExistType is used to define an existence condition when setting
+// or deleting Nodes.
+type PrevExistType string
+
+const (
+ PrevIgnore = PrevExistType("")
+ PrevExist = PrevExistType("true")
+ PrevNoExist = PrevExistType("false")
+)
+
+var (
+ defaultV2KeysPrefix = "/v2/keys"
+)
+
+// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
+// API over HTTP.
+func NewKeysAPI(c Client) KeysAPI {
+ return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
+}
+
+// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
+// to provide a custom base URL path. This should only be used in
+// very rare cases.
+func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
+ return &httpKeysAPI{
+ client: c,
+ prefix: p,
+ }
+}
+
+type KeysAPI interface {
+ // Get retrieves a set of Nodes from etcd
+ Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
+
+ // Set assigns a new value to a Node identified by a given key. The caller
+ // may define a set of conditions in the SetOptions. If SetOptions.Dir=true
+ // then value is ignored.
+ Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
+
+ // Delete removes a Node identified by the given key, optionally destroying
+ // all of its children as well. The caller may define a set of required
+ // conditions in an DeleteOptions object.
+ Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
+
+ // Create is an alias for Set w/ PrevExist=false
+ Create(ctx context.Context, key, value string) (*Response, error)
+
+ // CreateInOrder is used to atomically create in-order keys within the given directory.
+ CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
+
+ // Update is an alias for Set w/ PrevExist=true
+ Update(ctx context.Context, key, value string) (*Response, error)
+
+ // Watcher builds a new Watcher targeted at a specific Node identified
+ // by the given key. The Watcher may be configured at creation time
+ // through a WatcherOptions object. The returned Watcher is designed
+ // to emit events that happen to a Node, and optionally to its children.
+ Watcher(key string, opts *WatcherOptions) Watcher
+}
+
+type WatcherOptions struct {
+ // AfterIndex defines the index after-which the Watcher should
+ // start emitting events. For example, if a value of 5 is
+ // provided, the first event will have an index >= 6.
+ //
+ // Setting AfterIndex to 0 (default) means that the Watcher
+ // should start watching for events starting at the current
+ // index, whatever that may be.
+ AfterIndex uint64
+
+ // Recursive specifies whether or not the Watcher should emit
+ // events that occur in children of the given keyspace. If set
+ // to false (default), events will be limited to those that
+ // occur for the exact key.
+ Recursive bool
+}
+
+type CreateInOrderOptions struct {
+ // TTL defines a period of time after-which the Node should
+ // expire and no longer exist. Values <= 0 are ignored. Given
+ // that the zero-value is ignored, TTL cannot be used to set
+ // a TTL of 0.
+ TTL time.Duration
+}
+
+type SetOptions struct {
+ // PrevValue specifies what the current value of the Node must
+ // be in order for the Set operation to succeed.
+ //
+ // Leaving this field empty means that the caller wishes to
+ // ignore the current value of the Node. This cannot be used
+ // to compare the Node's current value to an empty string.
+ //
+ // PrevValue is ignored if Dir=true
+ PrevValue string
+
+ // PrevIndex indicates what the current ModifiedIndex of the
+ // Node must be in order for the Set operation to succeed.
+ //
+ // If PrevIndex is set to 0 (default), no comparison is made.
+ PrevIndex uint64
+
+ // PrevExist specifies whether the Node must currently exist
+ // (PrevExist) or not (PrevNoExist). If the caller does not
+ // care about existence, set PrevExist to PrevIgnore, or simply
+ // leave it unset.
+ PrevExist PrevExistType
+
+ // TTL defines a period of time after-which the Node should
+ // expire and no longer exist. Values <= 0 are ignored. Given
+ // that the zero-value is ignored, TTL cannot be used to set
+ // a TTL of 0.
+ TTL time.Duration
+
+ // Refresh set to true means a TTL value can be updated
+ // without firing a watch or changing the node value. A
+ // value must not be provided when refreshing a key.
+ Refresh bool
+
+ // Dir specifies whether or not this Node should be created as a directory.
+ Dir bool
+
+ // NoValueOnSuccess specifies whether the response contains the current value of the Node.
+ // If set, the response will only contain the current value when the request fails.
+ NoValueOnSuccess bool
+}
+
+type GetOptions struct {
+ // Recursive defines whether or not all children of the Node
+ // should be returned.
+ Recursive bool
+
+ // Sort instructs the server whether or not to sort the Nodes.
+ // If true, the Nodes are sorted alphabetically by key in
+ // ascending order (A to z). If false (default), the Nodes will
+ // not be sorted and the ordering used should not be considered
+ // predictable.
+ Sort bool
+
+ // Quorum specifies whether it gets the latest committed value that
+ // has been applied in quorum of members, which ensures external
+ // consistency (or linearizability).
+ Quorum bool
+}
+
+type DeleteOptions struct {
+ // PrevValue specifies what the current value of the Node must
+ // be in order for the Delete operation to succeed.
+ //
+ // Leaving this field empty means that the caller wishes to
+ // ignore the current value of the Node. This cannot be used
+ // to compare the Node's current value to an empty string.
+ PrevValue string
+
+ // PrevIndex indicates what the current ModifiedIndex of the
+ // Node must be in order for the Delete operation to succeed.
+ //
+ // If PrevIndex is set to 0 (default), no comparison is made.
+ PrevIndex uint64
+
+ // Recursive defines whether or not all children of the Node
+ // should be deleted. If set to true, all children of the Node
+ // identified by the given key will be deleted. If left unset
+ // or explicitly set to false, only a single Node will be
+ // deleted.
+ Recursive bool
+
+ // Dir specifies whether or not this Node should be removed as a directory.
+ Dir bool
+}
+
+type Watcher interface {
+ // Next blocks until an etcd event occurs, then returns a Response
+ // representing that event. The behavior of Next depends on the
+ // WatcherOptions used to construct the Watcher. Next is designed to
+ // be called repeatedly, each time blocking until a subsequent event
+ // is available.
+ //
+ // If the provided context is cancelled, Next will return a non-nil
+ // error. Any other failures encountered while waiting for the next
+ // event (connection issues, deserialization failures, etc) will
+ // also result in a non-nil error.
+ Next(context.Context) (*Response, error)
+}
+
+type Response struct {
+ // Action is the name of the operation that occurred. Possible values
+ // include get, set, delete, update, create, compareAndSwap,
+ // compareAndDelete and expire.
+ Action string `json:"action"`
+
+ // Node represents the state of the relevant etcd Node.
+ Node *Node `json:"node"`
+
+ // PrevNode represents the previous state of the Node. PrevNode is non-nil
+ // only if the Node existed before the action occurred and the action
+ // caused a change to the Node.
+ PrevNode *Node `json:"prevNode"`
+
+ // Index holds the cluster-level index at the time the Response was generated.
+ // This index is not tied to the Node(s) contained in this Response.
+ Index uint64 `json:"-"`
+
+ // ClusterID holds the cluster-level ID reported by the server. This
+ // should be different for different etcd clusters.
+ ClusterID string `json:"-"`
+}
+
+type Node struct {
+ // Key represents the unique location of this Node (e.g. "/foo/bar").
+ Key string `json:"key"`
+
+ // Dir reports whether node describes a directory.
+ Dir bool `json:"dir,omitempty"`
+
+ // Value is the current data stored on this Node. If this Node
+ // is a directory, Value will be empty.
+ Value string `json:"value"`
+
+ // Nodes holds the children of this Node, only if this Node is a directory.
+ // This slice of will be arbitrarily deep (children, grandchildren, great-
+ // grandchildren, etc.) if a recursive Get or Watch request were made.
+ Nodes Nodes `json:"nodes"`
+
+ // CreatedIndex is the etcd index at-which this Node was created.
+ CreatedIndex uint64 `json:"createdIndex"`
+
+ // ModifiedIndex is the etcd index at-which this Node was last modified.
+ ModifiedIndex uint64 `json:"modifiedIndex"`
+
+ // Expiration is the server side expiration time of the key.
+ Expiration *time.Time `json:"expiration,omitempty"`
+
+ // TTL is the time to live of the key in second.
+ TTL int64 `json:"ttl,omitempty"`
+}
+
+func (n *Node) String() string {
+ return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
+}
+
+// TTLDuration returns the Node's TTL as a time.Duration object
+func (n *Node) TTLDuration() time.Duration {
+ return time.Duration(n.TTL) * time.Second
+}
+
+type Nodes []*Node
+
+// interfaces for sorting
+
+func (ns Nodes) Len() int { return len(ns) }
+func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
+func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
+
+type httpKeysAPI struct {
+ client httpClient
+ prefix string
+}
+
+func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
+ act := &setAction{
+ Prefix: k.prefix,
+ Key: key,
+ Value: val,
+ }
+
+ if opts != nil {
+ act.PrevValue = opts.PrevValue
+ act.PrevIndex = opts.PrevIndex
+ act.PrevExist = opts.PrevExist
+ act.TTL = opts.TTL
+ act.Refresh = opts.Refresh
+ act.Dir = opts.Dir
+ act.NoValueOnSuccess = opts.NoValueOnSuccess
+ }
+
+ doCtx := ctx
+ if act.PrevExist == PrevNoExist {
+ doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
+ }
+ resp, body, err := k.client.Do(doCtx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
+ return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
+}
+
+func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
+ act := &createInOrderAction{
+ Prefix: k.prefix,
+ Dir: dir,
+ Value: val,
+ }
+
+ if opts != nil {
+ act.TTL = opts.TTL
+ }
+
+ resp, body, err := k.client.Do(ctx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
+ return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
+}
+
+func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
+ act := &deleteAction{
+ Prefix: k.prefix,
+ Key: key,
+ }
+
+ if opts != nil {
+ act.PrevValue = opts.PrevValue
+ act.PrevIndex = opts.PrevIndex
+ act.Dir = opts.Dir
+ act.Recursive = opts.Recursive
+ }
+
+ doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
+ resp, body, err := k.client.Do(doCtx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
+ act := &getAction{
+ Prefix: k.prefix,
+ Key: key,
+ }
+
+ if opts != nil {
+ act.Recursive = opts.Recursive
+ act.Sorted = opts.Sort
+ act.Quorum = opts.Quorum
+ }
+
+ resp, body, err := k.client.Do(ctx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
+ act := waitAction{
+ Prefix: k.prefix,
+ Key: key,
+ }
+
+ if opts != nil {
+ act.Recursive = opts.Recursive
+ if opts.AfterIndex > 0 {
+ act.WaitIndex = opts.AfterIndex + 1
+ }
+ }
+
+ return &httpWatcher{
+ client: k.client,
+ nextWait: act,
+ }
+}
+
+type httpWatcher struct {
+ client httpClient
+ nextWait waitAction
+}
+
+func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
+ for {
+ httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
+ if err != nil {
+ if err == ErrEmptyBody {
+ continue
+ }
+ return nil, err
+ }
+
+ hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
+ return resp, nil
+ }
+}
+
+// v2KeysURL forms a URL representing the location of a key.
+// The endpoint argument represents the base URL of an etcd
+// server. The prefix is the path needed to route from the
+// provided endpoint's path to the root of the keys API
+// (typically "/v2/keys").
+func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
+ // We concatenate all parts together manually. We cannot use
+ // path.Join because it does not reserve trailing slash.
+ // We call CanonicalURLPath to further cleanup the path.
+ if prefix != "" && prefix[0] != '/' {
+ prefix = "/" + prefix
+ }
+ if key != "" && key[0] != '/' {
+ key = "/" + key
+ }
+ ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
+ return &ep
+}
+
+type getAction struct {
+ Prefix string
+ Key string
+ Recursive bool
+ Sorted bool
+ Quorum bool
+}
+
+func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, g.Prefix, g.Key)
+
+ params := u.Query()
+ params.Set("recursive", strconv.FormatBool(g.Recursive))
+ params.Set("sorted", strconv.FormatBool(g.Sorted))
+ params.Set("quorum", strconv.FormatBool(g.Quorum))
+ u.RawQuery = params.Encode()
+
+ req, _ := http.NewRequest("GET", u.String(), nil)
+ return req
+}
+
+type waitAction struct {
+ Prefix string
+ Key string
+ WaitIndex uint64
+ Recursive bool
+}
+
+func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, w.Prefix, w.Key)
+
+ params := u.Query()
+ params.Set("wait", "true")
+ params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
+ params.Set("recursive", strconv.FormatBool(w.Recursive))
+ u.RawQuery = params.Encode()
+
+ req, _ := http.NewRequest("GET", u.String(), nil)
+ return req
+}
+
+type setAction struct {
+ Prefix string
+ Key string
+ Value string
+ PrevValue string
+ PrevIndex uint64
+ PrevExist PrevExistType
+ TTL time.Duration
+ Refresh bool
+ Dir bool
+ NoValueOnSuccess bool
+}
+
+func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, a.Prefix, a.Key)
+
+ params := u.Query()
+ form := url.Values{}
+
+ // we're either creating a directory or setting a key
+ if a.Dir {
+ params.Set("dir", strconv.FormatBool(a.Dir))
+ } else {
+ // These options are only valid for setting a key
+ if a.PrevValue != "" {
+ params.Set("prevValue", a.PrevValue)
+ }
+ form.Add("value", a.Value)
+ }
+
+ // Options which apply to both setting a key and creating a dir
+ if a.PrevIndex != 0 {
+ params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
+ }
+ if a.PrevExist != PrevIgnore {
+ params.Set("prevExist", string(a.PrevExist))
+ }
+ if a.TTL > 0 {
+ form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
+ }
+
+ if a.Refresh {
+ form.Add("refresh", "true")
+ }
+ if a.NoValueOnSuccess {
+ params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
+ }
+
+ u.RawQuery = params.Encode()
+ body := strings.NewReader(form.Encode())
+
+ req, _ := http.NewRequest("PUT", u.String(), body)
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ return req
+}
+
+type deleteAction struct {
+ Prefix string
+ Key string
+ PrevValue string
+ PrevIndex uint64
+ Dir bool
+ Recursive bool
+}
+
+func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, a.Prefix, a.Key)
+
+ params := u.Query()
+ if a.PrevValue != "" {
+ params.Set("prevValue", a.PrevValue)
+ }
+ if a.PrevIndex != 0 {
+ params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
+ }
+ if a.Dir {
+ params.Set("dir", "true")
+ }
+ if a.Recursive {
+ params.Set("recursive", "true")
+ }
+ u.RawQuery = params.Encode()
+
+ req, _ := http.NewRequest("DELETE", u.String(), nil)
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ return req
+}
+
+type createInOrderAction struct {
+ Prefix string
+ Dir string
+ Value string
+ TTL time.Duration
+}
+
+func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, a.Prefix, a.Dir)
+
+ form := url.Values{}
+ form.Add("value", a.Value)
+ if a.TTL > 0 {
+ form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
+ }
+ body := strings.NewReader(form.Encode())
+
+ req, _ := http.NewRequest("POST", u.String(), body)
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ return req
+}
+
+func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
+ switch code {
+ case http.StatusOK, http.StatusCreated:
+ if len(body) == 0 {
+ return nil, ErrEmptyBody
+ }
+ res, err = unmarshalSuccessfulKeysResponse(header, body)
+ default:
+ err = unmarshalFailedKeysResponse(body)
+ }
+ return res, err
+}
+
+var jsonIterator = caseSensitiveJsonIterator()
+
+func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
+ var res Response
+ err := jsonIterator.Unmarshal(body, &res)
+ if err != nil {
+ return nil, ErrInvalidJSON
+ }
+ if header.Get("X-Etcd-Index") != "" {
+ res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+ res.ClusterID = header.Get("X-Etcd-Cluster-ID")
+ return &res, nil
+}
+
+func unmarshalFailedKeysResponse(body []byte) error {
+ var etcdErr Error
+ if err := json.Unmarshal(body, &etcdErr); err != nil {
+ return ErrInvalidJSON
+ }
+ return etcdErr
+}
diff --git a/vendor/go.etcd.io/etcd/client/members.go b/vendor/go.etcd.io/etcd/client/members.go
new file mode 100644
index 000000000000..657131ab0ce0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/members.go
@@ -0,0 +1,303 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+
+ "go.etcd.io/etcd/pkg/types"
+)
+
+var (
+ defaultV2MembersPrefix = "/v2/members"
+ defaultLeaderSuffix = "/leader"
+)
+
+type Member struct {
+ // ID is the unique identifier of this Member.
+ ID string `json:"id"`
+
+ // Name is a human-readable, non-unique identifier of this Member.
+ Name string `json:"name"`
+
+ // PeerURLs represents the HTTP(S) endpoints this Member uses to
+ // participate in etcd's consensus protocol.
+ PeerURLs []string `json:"peerURLs"`
+
+ // ClientURLs represents the HTTP(S) endpoints on which this Member
+ // serves its client-facing APIs.
+ ClientURLs []string `json:"clientURLs"`
+}
+
+type memberCollection []Member
+
+func (c *memberCollection) UnmarshalJSON(data []byte) error {
+ d := struct {
+ Members []Member
+ }{}
+
+ if err := json.Unmarshal(data, &d); err != nil {
+ return err
+ }
+
+ if d.Members == nil {
+ *c = make([]Member, 0)
+ return nil
+ }
+
+ *c = d.Members
+ return nil
+}
+
+type memberCreateOrUpdateRequest struct {
+ PeerURLs types.URLs
+}
+
+func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
+ s := struct {
+ PeerURLs []string `json:"peerURLs"`
+ }{
+ PeerURLs: make([]string, len(m.PeerURLs)),
+ }
+
+ for i, u := range m.PeerURLs {
+ s.PeerURLs[i] = u.String()
+ }
+
+ return json.Marshal(&s)
+}
+
+// NewMembersAPI constructs a new MembersAPI that uses HTTP to
+// interact with etcd's membership API.
+func NewMembersAPI(c Client) MembersAPI {
+ return &httpMembersAPI{
+ client: c,
+ }
+}
+
+type MembersAPI interface {
+ // List enumerates the current cluster membership.
+ List(ctx context.Context) ([]Member, error)
+
+ // Add instructs etcd to accept a new Member into the cluster.
+ Add(ctx context.Context, peerURL string) (*Member, error)
+
+ // Remove demotes an existing Member out of the cluster.
+ Remove(ctx context.Context, mID string) error
+
+ // Update instructs etcd to update an existing Member in the cluster.
+ Update(ctx context.Context, mID string, peerURLs []string) error
+
+ // Leader gets current leader of the cluster
+ Leader(ctx context.Context) (*Member, error)
+}
+
+type httpMembersAPI struct {
+ client httpClient
+}
+
+func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
+ req := &membersAPIActionList{}
+ resp, body, err := m.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ return nil, err
+ }
+
+ var mCollection memberCollection
+ if err := json.Unmarshal(body, &mCollection); err != nil {
+ return nil, err
+ }
+
+ return []Member(mCollection), nil
+}
+
+func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
+ urls, err := types.NewURLs([]string{peerURL})
+ if err != nil {
+ return nil, err
+ }
+
+ req := &membersAPIActionAdd{peerURLs: urls}
+ resp, body, err := m.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusCreated {
+ var merr membersError
+ if err := json.Unmarshal(body, &merr); err != nil {
+ return nil, err
+ }
+ return nil, merr
+ }
+
+ var memb Member
+ if err := json.Unmarshal(body, &memb); err != nil {
+ return nil, err
+ }
+
+ return &memb, nil
+}
+
+func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
+ urls, err := types.NewURLs(peerURLs)
+ if err != nil {
+ return err
+ }
+
+ req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
+ resp, body, err := m.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusNoContent {
+ var merr membersError
+ if err := json.Unmarshal(body, &merr); err != nil {
+ return err
+ }
+ return merr
+ }
+
+ return nil
+}
+
+func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
+ req := &membersAPIActionRemove{memberID: memberID}
+ resp, _, err := m.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
+}
+
+func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
+ req := &membersAPIActionLeader{}
+ resp, body, err := m.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ return nil, err
+ }
+
+ var leader Member
+ if err := json.Unmarshal(body, &leader); err != nil {
+ return nil, err
+ }
+
+ return &leader, nil
+}
+
+type membersAPIActionList struct{}
+
+func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ req, _ := http.NewRequest("GET", u.String(), nil)
+ return req
+}
+
+type membersAPIActionRemove struct {
+ memberID string
+}
+
+func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ u.Path = path.Join(u.Path, d.memberID)
+ req, _ := http.NewRequest("DELETE", u.String(), nil)
+ return req
+}
+
+type membersAPIActionAdd struct {
+ peerURLs types.URLs
+}
+
+func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
+ b, _ := json.Marshal(&m)
+ req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+type membersAPIActionUpdate struct {
+ memberID string
+ peerURLs types.URLs
+}
+
+func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
+ u.Path = path.Join(u.Path, a.memberID)
+ b, _ := json.Marshal(&m)
+ req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func assertStatusCode(got int, want ...int) (err error) {
+ for _, w := range want {
+ if w == got {
+ return nil
+ }
+ }
+ return fmt.Errorf("unexpected status code %d", got)
+}
+
+type membersAPIActionLeader struct{}
+
+func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ u.Path = path.Join(u.Path, defaultLeaderSuffix)
+ req, _ := http.NewRequest("GET", u.String(), nil)
+ return req
+}
+
+// v2MembersURL add the necessary path to the provided endpoint
+// to route requests to the default v2 members API.
+func v2MembersURL(ep url.URL) *url.URL {
+ ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
+ return &ep
+}
+
+type membersError struct {
+ Message string `json:"message"`
+ Code int `json:"-"`
+}
+
+func (e membersError) Error() string {
+ return e.Message
+}
diff --git a/vendor/go.etcd.io/etcd/client/util.go b/vendor/go.etcd.io/etcd/client/util.go
new file mode 100644
index 000000000000..15a8babff4d4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/util.go
@@ -0,0 +1,53 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "regexp"
+)
+
+var (
+ roleNotFoundRegExp *regexp.Regexp
+ userNotFoundRegExp *regexp.Regexp
+)
+
+func init() {
+ roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
+ userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
+}
+
+// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
+func IsKeyNotFound(err error) bool {
+ if cErr, ok := err.(Error); ok {
+ return cErr.Code == ErrorCodeKeyNotFound
+ }
+ return false
+}
+
+// IsRoleNotFound returns true if the error means role not found of v2 API.
+func IsRoleNotFound(err error) bool {
+ if ae, ok := err.(authError); ok {
+ return roleNotFoundRegExp.MatchString(ae.Message)
+ }
+ return false
+}
+
+// IsUserNotFound returns true if the error means user not found of v2 API.
+func IsUserNotFound(err error) bool {
+ if ae, ok := err.(authError); ok {
+ return userNotFoundRegExp.MatchString(ae.Message)
+ }
+ return false
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
index 9e043789c8df..f4b941d6529e 100644
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/err.go
@@ -34,6 +34,6 @@ func (ep *errPicker) String() string {
return ep.p.String()
}
-func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+func (ep *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
return nil, nil, ep.err
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
index 1b8b28573782..e3971ecc4210 100644
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/picker/roundrobin_balanced.go
@@ -52,7 +52,7 @@ type rrBalanced struct {
func (rb *rrBalanced) String() string { return rb.p.String() }
// Pick is called for every client request.
-func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
rb.mu.RLock()
n := len(rb.scs)
rb.mu.RUnlock()
diff --git a/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
index 864b5df6426f..2837bd4180bd 100644
--- a/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
+++ b/vendor/go.etcd.io/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
@@ -111,7 +111,7 @@ func (e *ResolverGroup) Close() {
}
// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
-func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
if len(target.Authority) < 1 {
return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
}
@@ -179,7 +179,7 @@ func epsToAddrs(eps ...string) (addrs []resolver.Address) {
return addrs
}
-func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
+func (*Resolver) ResolveNow(o resolver.ResolveNowOptions) {}
func (r *Resolver) Close() {
es, err := bldr.getResolverGroup(r.endpointID)
diff --git a/vendor/go.etcd.io/etcd/clientv3/client.go b/vendor/go.etcd.io/etcd/clientv3/client.go
index 215e05479809..a35ec679a029 100644
--- a/vendor/go.etcd.io/etcd/clientv3/client.go
+++ b/vendor/go.etcd.io/etcd/clientv3/client.go
@@ -37,7 +37,6 @@ import (
"google.golang.org/grpc/codes"
grpccredentials "google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@@ -397,13 +396,6 @@ func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCrede
return creds
}
-// WithRequireLeader requires client requests to only succeed
-// when the cluster has a leader.
-func WithRequireLeader(ctx context.Context) context.Context {
- md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
- return metadata.NewOutgoingContext(ctx, md)
-}
-
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
new file mode 100644
index 000000000000..dcdbf511d1b1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package concurrency implements concurrency operations on top of
+// etcd such as distributed locks, barriers, and elections.
+package concurrency
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
new file mode 100644
index 000000000000..2521db6ac045
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
@@ -0,0 +1,254 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ v3 "go.etcd.io/etcd/clientv3"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+)
+
+var (
+ ErrElectionNotLeader = errors.New("election: not leader")
+ ErrElectionNoLeader = errors.New("election: no leader")
+)
+
+type Election struct {
+ session *Session
+
+ keyPrefix string
+
+ leaderKey string
+ leaderRev int64
+ leaderSession *Session
+ hdr *pb.ResponseHeader
+}
+
+// NewElection returns a new election on a given key prefix.
+func NewElection(s *Session, pfx string) *Election {
+ return &Election{session: s, keyPrefix: pfx + "/"}
+}
+
+// ResumeElection initializes an election with a known leader.
+func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
+ return &Election{
+ keyPrefix: pfx,
+ session: s,
+ leaderKey: leaderKey,
+ leaderRev: leaderRev,
+ leaderSession: s,
+ }
+}
+
+// Campaign puts a value as eligible for the election on the prefix
+// key.
+// Multiple sessions can participate in the election for the
+// same prefix, but only one can be the leader at a time.
+//
+// If the context is 'context.TODO()/context.Background()', the Campaign
+// will continue to be blocked for other keys to be deleted, unless server
+// returns a non-recoverable error (e.g. ErrCompacted).
+// Otherwise, until the context is not cancelled or timed-out, Campaign will
+// continue to be blocked until it becomes the leader.
+func (e *Election) Campaign(ctx context.Context, val string) error {
+ s := e.session
+ client := e.session.Client()
+
+ k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
+ txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
+ txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
+ txn = txn.Else(v3.OpGet(k))
+ resp, err := txn.Commit()
+ if err != nil {
+ return err
+ }
+ e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
+ if !resp.Succeeded {
+ kv := resp.Responses[0].GetResponseRange().Kvs[0]
+ e.leaderRev = kv.CreateRevision
+ if string(kv.Value) != val {
+ if err = e.Proclaim(ctx, val); err != nil {
+ e.Resign(ctx)
+ return err
+ }
+ }
+ }
+
+ _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
+ if err != nil {
+ // clean up in case of context cancel
+ select {
+ case <-ctx.Done():
+ e.Resign(client.Ctx())
+ default:
+ e.leaderSession = nil
+ }
+ return err
+ }
+ e.hdr = resp.Header
+
+ return nil
+}
+
+// Proclaim lets the leader announce a new value without another election.
+func (e *Election) Proclaim(ctx context.Context, val string) error {
+ if e.leaderSession == nil {
+ return ErrElectionNotLeader
+ }
+ client := e.session.Client()
+ cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+ txn := client.Txn(ctx).If(cmp)
+ txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
+ tresp, terr := txn.Commit()
+ if terr != nil {
+ return terr
+ }
+ if !tresp.Succeeded {
+ e.leaderKey = ""
+ return ErrElectionNotLeader
+ }
+
+ e.hdr = tresp.Header
+ return nil
+}
+
+// Resign lets a leader start a new election.
+func (e *Election) Resign(ctx context.Context) (err error) {
+ if e.leaderSession == nil {
+ return nil
+ }
+ client := e.session.Client()
+ cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+ resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
+ if err == nil {
+ e.hdr = resp.Header
+ }
+ e.leaderKey = ""
+ e.leaderSession = nil
+ return err
+}
+
+// Leader returns the leader value for the current election.
+func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
+ client := e.session.Client()
+ resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+ if err != nil {
+ return nil, err
+ } else if len(resp.Kvs) == 0 {
+ // no leader currently elected
+ return nil, ErrElectionNoLeader
+ }
+ return resp, nil
+}
+
+// Observe returns a channel that reliably observes ordered leader proposals
+// as GetResponse values on every current elected leader key. It will not
+// necessarily fetch all historical leader updates, but will always post the
+// most recent leader value.
+//
+// The channel closes when the context is canceled or the underlying watcher
+// is otherwise disrupted.
+func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
+ retc := make(chan v3.GetResponse)
+ go e.observe(ctx, retc)
+ return retc
+}
+
+func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
+ client := e.session.Client()
+
+ defer close(ch)
+ for {
+ resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+ if err != nil {
+ return
+ }
+
+ var kv *mvccpb.KeyValue
+ var hdr *pb.ResponseHeader
+
+ if len(resp.Kvs) == 0 {
+ cctx, cancel := context.WithCancel(ctx)
+ // wait for first key put on prefix
+ opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
+ wch := client.Watch(cctx, e.keyPrefix, opts...)
+ for kv == nil {
+ wr, ok := <-wch
+ if !ok || wr.Err() != nil {
+ cancel()
+ return
+ }
+ // only accept puts; a delete will make observe() spin
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.PUT {
+ hdr, kv = &wr.Header, ev.Kv
+ // may have multiple revs; hdr.rev = the last rev
+ // set to kv's rev in case batch has multiple Puts
+ hdr.Revision = kv.ModRevision
+ break
+ }
+ }
+ }
+ cancel()
+ } else {
+ hdr, kv = resp.Header, resp.Kvs[0]
+ }
+
+ select {
+ case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
+ case <-ctx.Done():
+ return
+ }
+
+ cctx, cancel := context.WithCancel(ctx)
+ wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
+ keyDeleted := false
+ for !keyDeleted {
+ wr, ok := <-wch
+ if !ok {
+ cancel()
+ return
+ }
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.DELETE {
+ keyDeleted = true
+ break
+ }
+ resp.Header = &wr.Header
+ resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
+ select {
+ case ch <- *resp:
+ case <-cctx.Done():
+ cancel()
+ return
+ }
+ }
+ }
+ cancel()
+ }
+}
+
+// Key returns the leader key if elected, empty string otherwise.
+func (e *Election) Key() string { return e.leaderKey }
+
+// Rev returns the leader key's creation revision, if elected.
+func (e *Election) Rev() int64 { return e.leaderRev }
+
+// Header is the response header from the last successful election proposal.
+func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
new file mode 100644
index 000000000000..e4cf77517401
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
@@ -0,0 +1,65 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "fmt"
+
+ v3 "go.etcd.io/etcd/clientv3"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+)
+
+func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
+ cctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ var wr v3.WatchResponse
+ wch := client.Watch(cctx, key, v3.WithRev(rev))
+ for wr = range wch {
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.DELETE {
+ return nil
+ }
+ }
+ }
+ if err := wr.Err(); err != nil {
+ return err
+ }
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return fmt.Errorf("lost watcher waiting for delete")
+}
+
+// waitDeletes efficiently waits until all keys matching the prefix and no greater
+// than the create revision.
+func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
+ getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
+ for {
+ resp, err := client.Get(ctx, pfx, getOpts...)
+ if err != nil {
+ return nil, err
+ }
+ if len(resp.Kvs) == 0 {
+ return resp.Header, nil
+ }
+ lastKey := string(resp.Kvs[0].Key)
+ if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
+ return nil, err
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
new file mode 100644
index 000000000000..013534193ea5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
@@ -0,0 +1,117 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ v3 "go.etcd.io/etcd/clientv3"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+)
+
+// Mutex implements the sync Locker interface with etcd
+type Mutex struct {
+ s *Session
+
+ pfx string
+ myKey string
+ myRev int64
+ hdr *pb.ResponseHeader
+}
+
+func NewMutex(s *Session, pfx string) *Mutex {
+ return &Mutex{s, pfx + "/", "", -1, nil}
+}
+
+// Lock locks the mutex with a cancelable context. If the context is canceled
+// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
+func (m *Mutex) Lock(ctx context.Context) error {
+ s := m.s
+ client := m.s.Client()
+
+ m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
+ cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
+ // put self in lock waiters via myKey; oldest waiter holds lock
+ put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
+ // reuse key in case this session already holds the lock
+ get := v3.OpGet(m.myKey)
+ // fetch current holder to complete uncontended path with only one RPC
+ getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
+ resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
+ if err != nil {
+ return err
+ }
+ m.myRev = resp.Header.Revision
+ if !resp.Succeeded {
+ m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
+ }
+ // if no key on prefix / the minimum rev is key, already hold the lock
+ ownerKey := resp.Responses[1].GetResponseRange().Kvs
+ if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+ m.hdr = resp.Header
+ return nil
+ }
+
+ // wait for deletion revisions prior to myKey
+ hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
+ // release lock key if wait failed
+ if werr != nil {
+ m.Unlock(client.Ctx())
+ } else {
+ m.hdr = hdr
+ }
+ return werr
+}
+
+func (m *Mutex) Unlock(ctx context.Context) error {
+ client := m.s.Client()
+ if _, err := client.Delete(ctx, m.myKey); err != nil {
+ return err
+ }
+ m.myKey = "\x00"
+ m.myRev = -1
+ return nil
+}
+
+func (m *Mutex) IsOwner() v3.Cmp {
+ return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
+}
+
+func (m *Mutex) Key() string { return m.myKey }
+
+// Header is the response header received from etcd on acquiring the lock.
+func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
+
+type lockerMutex struct{ *Mutex }
+
+func (lm *lockerMutex) Lock() {
+ client := lm.s.Client()
+ if err := lm.Mutex.Lock(client.Ctx()); err != nil {
+ panic(err)
+ }
+}
+func (lm *lockerMutex) Unlock() {
+ client := lm.s.Client()
+ if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
+ panic(err)
+ }
+}
+
+// NewLocker creates a sync.Locker backed by an etcd mutex.
+func NewLocker(s *Session, pfx string) sync.Locker {
+ return &lockerMutex{NewMutex(s, pfx)}
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
new file mode 100644
index 000000000000..97eb7631067d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
@@ -0,0 +1,141 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "time"
+
+ v3 "go.etcd.io/etcd/clientv3"
+)
+
+const defaultSessionTTL = 60
+
+// Session represents a lease kept alive for the lifetime of a client.
+// Fault-tolerant applications may use sessions to reason about liveness.
+type Session struct {
+ client *v3.Client
+ opts *sessionOptions
+ id v3.LeaseID
+
+ cancel context.CancelFunc
+ donec <-chan struct{}
+}
+
+// NewSession gets the leased session for a client.
+func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
+ ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
+ for _, opt := range opts {
+ opt(ops)
+ }
+
+ id := ops.leaseID
+ if id == v3.NoLease {
+ resp, err := client.Grant(ops.ctx, int64(ops.ttl))
+ if err != nil {
+ return nil, err
+ }
+ id = resp.ID
+ }
+
+ ctx, cancel := context.WithCancel(ops.ctx)
+ keepAlive, err := client.KeepAlive(ctx, id)
+ if err != nil || keepAlive == nil {
+ cancel()
+ return nil, err
+ }
+
+ donec := make(chan struct{})
+ s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
+
+ // keep the lease alive until client error or cancelled context
+ go func() {
+ defer close(donec)
+ for range keepAlive {
+ // eat messages until keep alive channel closes
+ }
+ }()
+
+ return s, nil
+}
+
+// Client is the etcd client that is attached to the session.
+func (s *Session) Client() *v3.Client {
+ return s.client
+}
+
+// Lease is the lease ID for keys bound to the session.
+func (s *Session) Lease() v3.LeaseID { return s.id }
+
+// Done returns a channel that closes when the lease is orphaned, expires, or
+// is otherwise no longer being refreshed.
+func (s *Session) Done() <-chan struct{} { return s.donec }
+
+// Orphan ends the refresh for the session lease. This is useful
+// in case the state of the client connection is indeterminate (revoke
+// would fail) or when transferring lease ownership.
+func (s *Session) Orphan() {
+ s.cancel()
+ <-s.donec
+}
+
+// Close orphans the session and revokes the session lease.
+func (s *Session) Close() error {
+ s.Orphan()
+ // if revoke takes longer than the ttl, lease is expired anyway
+ ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
+ _, err := s.client.Revoke(ctx, s.id)
+ cancel()
+ return err
+}
+
+type sessionOptions struct {
+ ttl int
+ leaseID v3.LeaseID
+ ctx context.Context
+}
+
+// SessionOption configures Session.
+type SessionOption func(*sessionOptions)
+
+// WithTTL configures the session's TTL in seconds.
+// If TTL is <= 0, the default 60 seconds TTL will be used.
+func WithTTL(ttl int) SessionOption {
+ return func(so *sessionOptions) {
+ if ttl > 0 {
+ so.ttl = ttl
+ }
+ }
+}
+
+// WithLease specifies the existing leaseID to be used for the session.
+// This is useful in process restart scenario, for example, to reclaim
+// leadership from an election prior to restart.
+func WithLease(leaseID v3.LeaseID) SessionOption {
+ return func(so *sessionOptions) {
+ so.leaseID = leaseID
+ }
+}
+
+// WithContext assigns a context to the session instead of defaulting to
+// using the client context. This is useful for canceling NewSession and
+// Close operations immediately without having to close the client. If the
+// context is canceled before Close() completes, the session's lease will be
+// abandoned and left to expire instead of being revoked.
+func WithContext(ctx context.Context) SessionOption {
+ return func(so *sessionOptions) {
+ so.ctx = ctx
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
new file mode 100644
index 000000000000..ee1151079abd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
@@ -0,0 +1,387 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "math"
+
+ v3 "go.etcd.io/etcd/clientv3"
+)
+
+// STM is an interface for software transactional memory.
+type STM interface {
+ // Get returns the value for a key and inserts the key in the txn's read set.
+ // If Get fails, it aborts the transaction with an error, never returning.
+ Get(key ...string) string
+ // Put adds a value for a key to the write set.
+ Put(key, val string, opts ...v3.OpOption)
+ // Rev returns the revision of a key in the read set.
+ Rev(key string) int64
+ // Del deletes a key.
+ Del(key string)
+
+ // commit attempts to apply the txn's changes to the server.
+ commit() *v3.TxnResponse
+ reset()
+}
+
+// Isolation is an enumeration of transactional isolation levels which
+// describes how transactions should interfere and conflict.
+type Isolation int
+
+const (
+ // SerializableSnapshot provides serializable isolation and also checks
+ // for write conflicts.
+ SerializableSnapshot Isolation = iota
+ // Serializable reads within the same transaction attempt return data
+ // from the at the revision of the first read.
+ Serializable
+ // RepeatableReads reads within the same transaction attempt always
+ // return the same data.
+ RepeatableReads
+ // ReadCommitted reads keys from any committed revision.
+ ReadCommitted
+)
+
+// stmError safely passes STM errors through panic to the STM error channel.
+type stmError struct{ err error }
+
+type stmOptions struct {
+ iso Isolation
+ ctx context.Context
+ prefetch []string
+}
+
+type stmOption func(*stmOptions)
+
+// WithIsolation specifies the transaction isolation level.
+func WithIsolation(lvl Isolation) stmOption {
+ return func(so *stmOptions) { so.iso = lvl }
+}
+
+// WithAbortContext specifies the context for permanently aborting the transaction.
+func WithAbortContext(ctx context.Context) stmOption {
+ return func(so *stmOptions) { so.ctx = ctx }
+}
+
+// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
+// If an STM transaction will unconditionally fetch a set of keys, prefetching
+// those keys will save the round-trip cost from requesting each key one by one
+// with Get().
+func WithPrefetch(keys ...string) stmOption {
+ return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
+}
+
+// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
+func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
+ opts := &stmOptions{ctx: c.Ctx()}
+ for _, f := range so {
+ f(opts)
+ }
+ if len(opts.prefetch) != 0 {
+ f := apply
+ apply = func(s STM) error {
+ s.Get(opts.prefetch...)
+ return f(s)
+ }
+ }
+ return runSTM(mkSTM(c, opts), apply)
+}
+
+func mkSTM(c *v3.Client, opts *stmOptions) STM {
+ switch opts.iso {
+ case SerializableSnapshot:
+ s := &stmSerializable{
+ stm: stm{client: c, ctx: opts.ctx},
+ prefetch: make(map[string]*v3.GetResponse),
+ }
+ s.conflicts = func() []v3.Cmp {
+ return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
+ }
+ return s
+ case Serializable:
+ s := &stmSerializable{
+ stm: stm{client: c, ctx: opts.ctx},
+ prefetch: make(map[string]*v3.GetResponse),
+ }
+ s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+ return s
+ case RepeatableReads:
+ s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+ s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+ return s
+ case ReadCommitted:
+ s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+ s.conflicts = func() []v3.Cmp { return nil }
+ return s
+ default:
+ panic("unsupported stm")
+ }
+}
+
+type stmResponse struct {
+ resp *v3.TxnResponse
+ err error
+}
+
+func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
+ outc := make(chan stmResponse, 1)
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ e, ok := r.(stmError)
+ if !ok {
+ // client apply panicked
+ panic(r)
+ }
+ outc <- stmResponse{nil, e.err}
+ }
+ }()
+ var out stmResponse
+ for {
+ s.reset()
+ if out.err = apply(s); out.err != nil {
+ break
+ }
+ if out.resp = s.commit(); out.resp != nil {
+ break
+ }
+ }
+ outc <- out
+ }()
+ r := <-outc
+ return r.resp, r.err
+}
+
+// stm implements repeatable-read software transactional memory over etcd
+type stm struct {
+ client *v3.Client
+ ctx context.Context
+ // rset holds read key values and revisions
+ rset readSet
+ // wset holds overwritten keys and their values
+ wset writeSet
+ // getOpts are the opts used for gets
+ getOpts []v3.OpOption
+ // conflicts computes the current conflicts on the txn
+ conflicts func() []v3.Cmp
+}
+
+type stmPut struct {
+ val string
+ op v3.Op
+}
+
+type readSet map[string]*v3.GetResponse
+
+func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
+ for i, resp := range txnresp.Responses {
+ rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
+ }
+}
+
+// first returns the store revision from the first fetch
+func (rs readSet) first() int64 {
+ ret := int64(math.MaxInt64 - 1)
+ for _, resp := range rs {
+ if rev := resp.Header.Revision; rev < ret {
+ ret = rev
+ }
+ }
+ return ret
+}
+
+// cmps guards the txn from updates to read set
+func (rs readSet) cmps() []v3.Cmp {
+ cmps := make([]v3.Cmp, 0, len(rs))
+ for k, rk := range rs {
+ cmps = append(cmps, isKeyCurrent(k, rk))
+ }
+ return cmps
+}
+
+type writeSet map[string]stmPut
+
+func (ws writeSet) get(keys ...string) *stmPut {
+ for _, key := range keys {
+ if wv, ok := ws[key]; ok {
+ return &wv
+ }
+ }
+ return nil
+}
+
+// cmps returns a cmp list testing no writes have happened past rev
+func (ws writeSet) cmps(rev int64) []v3.Cmp {
+ cmps := make([]v3.Cmp, 0, len(ws))
+ for key := range ws {
+ cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
+ }
+ return cmps
+}
+
+// puts is the list of ops for all pending writes
+func (ws writeSet) puts() []v3.Op {
+ puts := make([]v3.Op, 0, len(ws))
+ for _, v := range ws {
+ puts = append(puts, v.op)
+ }
+ return puts
+}
+
+func (s *stm) Get(keys ...string) string {
+ if wv := s.wset.get(keys...); wv != nil {
+ return wv.val
+ }
+ return respToValue(s.fetch(keys...))
+}
+
+func (s *stm) Put(key, val string, opts ...v3.OpOption) {
+ s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
+}
+
+func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
+
+func (s *stm) Rev(key string) int64 {
+ if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
+ return resp.Kvs[0].ModRevision
+ }
+ return 0
+}
+
+func (s *stm) commit() *v3.TxnResponse {
+ txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
+ if err != nil {
+ panic(stmError{err})
+ }
+ if txnresp.Succeeded {
+ return txnresp
+ }
+ return nil
+}
+
+func (s *stm) fetch(keys ...string) *v3.GetResponse {
+ if len(keys) == 0 {
+ return nil
+ }
+ ops := make([]v3.Op, len(keys))
+ for i, key := range keys {
+ if resp, ok := s.rset[key]; ok {
+ return resp
+ }
+ ops[i] = v3.OpGet(key, s.getOpts...)
+ }
+ txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
+ if err != nil {
+ panic(stmError{err})
+ }
+ s.rset.add(keys, txnresp)
+ return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
+}
+
+func (s *stm) reset() {
+ s.rset = make(map[string]*v3.GetResponse)
+ s.wset = make(map[string]stmPut)
+}
+
+type stmSerializable struct {
+ stm
+ prefetch map[string]*v3.GetResponse
+}
+
+func (s *stmSerializable) Get(keys ...string) string {
+ if wv := s.wset.get(keys...); wv != nil {
+ return wv.val
+ }
+ firstRead := len(s.rset) == 0
+ for _, key := range keys {
+ if resp, ok := s.prefetch[key]; ok {
+ delete(s.prefetch, key)
+ s.rset[key] = resp
+ }
+ }
+ resp := s.stm.fetch(keys...)
+ if firstRead {
+ // txn's base revision is defined by the first read
+ s.getOpts = []v3.OpOption{
+ v3.WithRev(resp.Header.Revision),
+ v3.WithSerializable(),
+ }
+ }
+ return respToValue(resp)
+}
+
+func (s *stmSerializable) Rev(key string) int64 {
+ s.Get(key)
+ return s.stm.Rev(key)
+}
+
+func (s *stmSerializable) gets() ([]string, []v3.Op) {
+ keys := make([]string, 0, len(s.rset))
+ ops := make([]v3.Op, 0, len(s.rset))
+ for k := range s.rset {
+ keys = append(keys, k)
+ ops = append(ops, v3.OpGet(k))
+ }
+ return keys, ops
+}
+
+func (s *stmSerializable) commit() *v3.TxnResponse {
+ keys, getops := s.gets()
+ txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
+ // use Else to prefetch keys in case of conflict to save a round trip
+ txnresp, err := txn.Else(getops...).Commit()
+ if err != nil {
+ panic(stmError{err})
+ }
+ if txnresp.Succeeded {
+ return txnresp
+ }
+ // load prefetch with Else data
+ s.rset.add(keys, txnresp)
+ s.prefetch = s.rset
+ s.getOpts = nil
+ return nil
+}
+
+func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
+ if len(r.Kvs) != 0 {
+ return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
+ }
+ return v3.Compare(v3.ModRevision(k), "=", 0)
+}
+
+func respToValue(resp *v3.GetResponse) string {
+ if resp == nil || len(resp.Kvs) == 0 {
+ return ""
+ }
+ return string(resp.Kvs[0].Value)
+}
+
+// NewSTMRepeatable is deprecated.
+func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+ return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
+}
+
+// NewSTMSerializable is deprecated.
+func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+ return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
+}
+
+// NewSTMReadCommitted is deprecated.
+func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+ return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/ctx.go b/vendor/go.etcd.io/etcd/clientv3/ctx.go
new file mode 100644
index 000000000000..542219837bbc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/clientv3/ctx.go
@@ -0,0 +1,64 @@
+// Copyright 2020 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "strings"
+
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ "go.etcd.io/etcd/version"
+ "google.golang.org/grpc/metadata"
+)
+
+// WithRequireLeader requires client requests to only succeed
+// when the cluster has a leader.
+func WithRequireLeader(ctx context.Context) context.Context {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok { // no outgoing metadata ctx key, create one
+ md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
+ return metadata.NewOutgoingContext(ctx, md)
+ }
+ copied := md.Copy() // avoid racey updates
+ // overwrite/add 'hasleader' key/value
+ metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
+ return metadata.NewOutgoingContext(ctx, copied)
+}
+
+// embeds client version
+func withVersion(ctx context.Context) context.Context {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok { // no outgoing metadata ctx key, create one
+ md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
+ return metadata.NewOutgoingContext(ctx, md)
+ }
+ copied := md.Copy() // avoid racey updates
+ // overwrite/add version key/value
+ metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
+ return metadata.NewOutgoingContext(ctx, copied)
+}
+
+func metadataGet(md metadata.MD, k string) []string {
+ k = strings.ToLower(k)
+ return md[k]
+}
+
+func metadataSet(md metadata.MD, k string, vals ...string) {
+ if len(vals) == 0 {
+ return
+ }
+ k = strings.ToLower(k)
+ md[k] = vals
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/maintenance.go b/vendor/go.etcd.io/etcd/clientv3/maintenance.go
index 744455a3b36d..809b8a3b4ba4 100644
--- a/vendor/go.etcd.io/etcd/clientv3/maintenance.go
+++ b/vendor/go.etcd.io/etcd/clientv3/maintenance.go
@@ -20,6 +20,7 @@ import (
"io"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.uber.org/zap"
"google.golang.org/grpc"
)
@@ -68,6 +69,7 @@ type Maintenance interface {
}
type maintenance struct {
+ lg *zap.Logger
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
remote pb.MaintenanceClient
callOpts []grpc.CallOption
@@ -75,6 +77,7 @@ type maintenance struct {
func NewMaintenance(c *Client) Maintenance {
api := &maintenance{
+ lg: c.lg,
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
conn, err := c.Dial(endpoint)
if err != nil {
@@ -93,6 +96,7 @@ func NewMaintenance(c *Client) Maintenance {
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
api := &maintenance{
+ lg: c.lg,
dial: func(string) (pb.MaintenanceClient, func(), error) {
return remote, func() {}, nil
},
@@ -193,23 +197,32 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
return nil, toErr(ctx, err)
}
+ m.lg.Info("opened snapshot stream; downloading")
pr, pw := io.Pipe()
go func() {
for {
resp, err := ss.Recv()
if err != nil {
+ switch err {
+ case io.EOF:
+ m.lg.Info("completed snapshot read; closing")
+ default:
+ m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err))
+ }
pw.CloseWithError(err)
return
}
- if resp == nil && err == nil {
- break
- }
+
+ // can "resp == nil && err == nil"
+ // before we receive snapshot SHA digest?
+ // No, server sends EOF with an empty response
+ // after it sends SHA digest at the end
+
if _, werr := pw.Write(resp.Blob); werr != nil {
pw.CloseWithError(werr)
return
}
}
- pw.Close()
}()
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
}
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
index 080490ae2929..2c266e55bec0 100644
--- a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
+++ b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
@@ -38,6 +38,7 @@ import (
func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ ctx = withVersion(ctx)
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
// short circuit for simplicity, and avoiding allocations.
@@ -103,6 +104,7 @@ func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOpt
func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ ctx = withVersion(ctx)
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
// short circuit for simplicity, and avoiding allocations.
@@ -113,10 +115,9 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp
return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
}
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
- logger.Warn("retry stream intercept", zap.Error(err))
if err != nil {
- // TODO(mwitkow): Maybe dial and transport errors should be retriable?
- return nil, err
+ logger.Error("streamer failed to create ClientStream", zap.Error(err))
+ return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
}
retryingStreamer := &serverStreamingRetryingStream{
client: c,
@@ -185,6 +186,7 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
if !attemptRetry {
return lastErr // success or hard failure
}
+
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
@@ -192,12 +194,13 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
}
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
if err != nil {
- // TODO(mwitkow): Maybe dial and transport errors should be retriable?
- return err
+ s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
+ return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
}
s.setStream(newStream)
+
+ s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
- //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
if !attemptRetry {
return lastErr
}
diff --git a/vendor/go.etcd.io/etcd/embed/config.go b/vendor/go.etcd.io/etcd/embed/config.go
new file mode 100644
index 000000000000..2f64d927f2af
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/embed/config.go
@@ -0,0 +1,915 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v3compactor"
+ "go.etcd.io/etcd/pkg/flags"
+ "go.etcd.io/etcd/pkg/logutil"
+ "go.etcd.io/etcd/pkg/netutil"
+ "go.etcd.io/etcd/pkg/srv"
+ "go.etcd.io/etcd/pkg/tlsutil"
+ "go.etcd.io/etcd/pkg/transport"
+ "go.etcd.io/etcd/pkg/types"
+
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc"
+ "sigs.k8s.io/yaml"
+)
+
+const (
+ ClusterStateFlagNew = "new"
+ ClusterStateFlagExisting = "existing"
+
+ DefaultName = "default"
+ DefaultMaxSnapshots = 5
+ DefaultMaxWALs = 5
+ DefaultMaxTxnOps = uint(128)
+ DefaultMaxRequestBytes = 1.5 * 1024 * 1024
+ DefaultGRPCKeepAliveMinTime = 5 * time.Second
+ DefaultGRPCKeepAliveInterval = 2 * time.Hour
+ DefaultGRPCKeepAliveTimeout = 20 * time.Second
+
+ DefaultListenPeerURLs = "http://localhost:2380"
+ DefaultListenClientURLs = "http://localhost:2379"
+
+ DefaultLogOutput = "default"
+ JournalLogOutput = "systemd/journal"
+ StdErrLogOutput = "stderr"
+ StdOutLogOutput = "stdout"
+
+ // DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag.
+ // It's enabled by default.
+ DefaultStrictReconfigCheck = true
+ // DefaultEnableV2 is the default value for "--enable-v2" flag.
+ // v2 API is disabled by default.
+ DefaultEnableV2 = false
+
+ // maxElectionMs specifies the maximum value of election timeout.
+ // More details are listed in ../Documentation/tuning.md#time-parameters.
+ maxElectionMs = 50000
+ // backend freelist map type
+ freelistMapType = "map"
+)
+
+var (
+ ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " +
+ "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"")
+ ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
+
+ DefaultInitialAdvertisePeerURLs = "http://localhost:2380"
+ DefaultAdvertiseClientURLs = "http://localhost:2379"
+
+ defaultHostname string
+ defaultHostStatus error
+)
+
+var (
+ // CompactorModePeriodic is periodic compaction mode
+ // for "Config.AutoCompactionMode" field.
+ // If "AutoCompactionMode" is CompactorModePeriodic and
+ // "AutoCompactionRetention" is "1h", it automatically compacts
+ // compacts storage every hour.
+ CompactorModePeriodic = v3compactor.ModePeriodic
+
+ // CompactorModeRevision is revision-based compaction mode
+ // for "Config.AutoCompactionMode" field.
+ // If "AutoCompactionMode" is CompactorModeRevision and
+ // "AutoCompactionRetention" is "1000", it compacts log on
+ // revision 5000 when the current revision is 6000.
+ // This runs every 5-minute if enough of logs have proceeded.
+ CompactorModeRevision = v3compactor.ModeRevision
+)
+
+func init() {
+ defaultHostname, defaultHostStatus = netutil.GetDefaultHost()
+}
+
+// Config holds the arguments for configuring an etcd server.
+type Config struct {
+ Name string `json:"name"`
+ Dir string `json:"data-dir"`
+ WalDir string `json:"wal-dir"`
+
+ SnapshotCount uint64 `json:"snapshot-count"`
+
+ // SnapshotCatchUpEntries is the number of entries for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ // WARNING: only change this for tests.
+ // Always use "DefaultSnapshotCatchUpEntries"
+ SnapshotCatchUpEntries uint64
+
+ MaxSnapFiles uint `json:"max-snapshots"`
+ MaxWalFiles uint `json:"max-wals"`
+
+ // TickMs is the number of milliseconds between heartbeat ticks.
+ // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
+ // make ticks a cluster wide configuration.
+ TickMs uint `json:"heartbeat-interval"`
+ ElectionMs uint `json:"election-timeout"`
+
+ // InitialElectionTickAdvance is true, then local member fast-forwards
+ // election ticks to speed up "initial" leader election trigger. This
+ // benefits the case of larger election ticks. For instance, cross
+ // datacenter deployment may require longer election timeout of 10-second.
+ // If true, local node does not need wait up to 10-second. Instead,
+ // forwards its election ticks to 8-second, and have only 2-second left
+ // before leader election.
+ //
+ // Major assumptions are that:
+ // - cluster has no active leader thus advancing ticks enables faster
+ // leader election, or
+ // - cluster already has an established leader, and rejoining follower
+ // is likely to receive heartbeats from the leader after tick advance
+ // and before election timeout.
+ //
+ // However, when network from leader to rejoining follower is congested,
+ // and the follower does not receive leader heartbeat within left election
+ // ticks, disruptive election has to happen thus affecting cluster
+ // availabilities.
+ //
+ // Disabling this would slow down initial bootstrap process for cross
+ // datacenter deployments. Make your own tradeoffs by configuring
+ // --initial-election-tick-advance at the cost of slow initial bootstrap.
+ //
+ // If single-node, it advances ticks regardless.
+ //
+ // See https://github.com/etcd-io/etcd/issues/9333 for more detail.
+ InitialElectionTickAdvance bool `json:"initial-election-tick-advance"`
+
+ // BackendBatchInterval is the maximum time before commit the backend transaction.
+ BackendBatchInterval time.Duration `json:"backend-batch-interval"`
+ // BackendBatchLimit is the maximum operations before commit the backend transaction.
+ BackendBatchLimit int `json:"backend-batch-limit"`
+ QuotaBackendBytes int64 `json:"quota-backend-bytes"`
+ MaxTxnOps uint `json:"max-txn-ops"`
+ MaxRequestBytes uint `json:"max-request-bytes"`
+
+ LPUrls, LCUrls []url.URL
+ APUrls, ACUrls []url.URL
+ ClientTLSInfo transport.TLSInfo
+ ClientAutoTLS bool
+ PeerTLSInfo transport.TLSInfo
+ PeerAutoTLS bool
+
+ // CipherSuites is a list of supported TLS cipher suites between
+ // client/server and peers. If empty, Go auto-populates the list.
+ // Note that cipher suites are prioritized in the given order.
+ CipherSuites []string `json:"cipher-suites"`
+
+ ClusterState string `json:"initial-cluster-state"`
+ DNSCluster string `json:"discovery-srv"`
+ DNSClusterServiceName string `json:"discovery-srv-name"`
+ Dproxy string `json:"discovery-proxy"`
+ Durl string `json:"discovery"`
+ InitialCluster string `json:"initial-cluster"`
+ InitialClusterToken string `json:"initial-cluster-token"`
+ StrictReconfigCheck bool `json:"strict-reconfig-check"`
+ EnableV2 bool `json:"enable-v2"`
+
+ // AutoCompactionMode is either 'periodic' or 'revision'.
+ AutoCompactionMode string `json:"auto-compaction-mode"`
+ // AutoCompactionRetention is either duration string with time unit
+ // (e.g. '5m' for 5-minute), or revision unit (e.g. '5000').
+ // If no time unit is provided and compaction mode is 'periodic',
+ // the unit defaults to hour. For example, '5' translates into 5-hour.
+ AutoCompactionRetention string `json:"auto-compaction-retention"`
+
+ // GRPCKeepAliveMinTime is the minimum interval that a client should
+ // wait before pinging server. When client pings "too fast", server
+ // sends goaway and closes the connection (errors: too_many_pings,
+ // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens.
+ // Server expects client pings only when there is any active streams
+ // (PermitWithoutStream is set false).
+ GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"`
+ // GRPCKeepAliveInterval is the frequency of server-to-client ping
+ // to check if a connection is alive. Close a non-responsive connection
+ // after an additional duration of Timeout. 0 to disable.
+ GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"`
+ // GRPCKeepAliveTimeout is the additional duration of wait
+ // before closing a non-responsive connection. 0 to disable.
+ GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
+
+ // PreVote is true to enable Raft Pre-Vote.
+ // If enabled, Raft runs an additional election phase
+ // to check whether it would get enough votes to win
+ // an election, thus minimizing disruptions.
+ // TODO: enable by default in 3.5.
+ PreVote bool `json:"pre-vote"`
+
+ CORS map[string]struct{}
+
+ // HostWhitelist lists acceptable hostnames from HTTP client requests.
+ // Client origin policy protects against "DNS Rebinding" attacks
+ // to insecure etcd servers. That is, any website can simply create
+ // an authorized DNS name, and direct DNS to "localhost" (or any
+ // other address). Then, all HTTP endpoints of etcd server listening
+ // on "localhost" becomes accessible, thus vulnerable to DNS rebinding
+ // attacks. See "CVE-2018-5702" for more detail.
+ //
+ // 1. If client connection is secure via HTTPS, allow any hostnames.
+ // 2. If client connection is not secure and "HostWhitelist" is not empty,
+ // only allow HTTP requests whose Host field is listed in whitelist.
+ //
+ // Note that the client origin policy is enforced whether authentication
+ // is enabled or not, for tighter controls.
+ //
+ // By default, "HostWhitelist" is "*", which allows any hostnames.
+ // Note that when specifying hostnames, loopback addresses are not added
+ // automatically. To allow loopback interfaces, leave it empty or set it "*",
+ // or add them to whitelist manually (e.g. "localhost", "127.0.0.1", etc.).
+ //
+ // CVE-2018-5702 reference:
+ // - https://bugs.chromium.org/p/project-zero/issues/detail?id=1447#c2
+ // - https://github.com/transmission/transmission/pull/468
+ // - https://github.com/etcd-io/etcd/issues/9353
+ HostWhitelist map[string]struct{}
+
+ // UserHandlers is for registering users handlers and only used for
+ // embedding etcd into other applications.
+ // The map key is the route path for the handler, and
+ // you must ensure it can't be conflicted with etcd's.
+ UserHandlers map[string]http.Handler `json:"-"`
+ // ServiceRegister is for registering users' gRPC services. A simple usage example:
+ // cfg := embed.NewConfig()
+ // cfg.ServerRegister = func(s *grpc.Server) {
+ // pb.RegisterFooServer(s, &fooServer{})
+ // pb.RegisterBarServer(s, &barServer{})
+ // }
+ // embed.StartEtcd(cfg)
+ ServiceRegister func(*grpc.Server) `json:"-"`
+
+ AuthToken string `json:"auth-token"`
+ BcryptCost uint `json:"bcrypt-cost"`
+
+ ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"`
+ ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"`
+ ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"`
+ // ExperimentalBackendFreelistType specifies the type of freelist that boltdb backend uses (array and map are supported types).
+ ExperimentalBackendFreelistType string `json:"experimental-backend-bbolt-freelist-type"`
+ // ExperimentalEnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
+ ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
+ ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
+
+ // ForceNewCluster starts a new cluster even if previously started; unsafe.
+ ForceNewCluster bool `json:"force-new-cluster"`
+
+ EnablePprof bool `json:"enable-pprof"`
+ Metrics string `json:"metrics"`
+ ListenMetricsUrls []url.URL
+ ListenMetricsUrlsJSON string `json:"listen-metrics-urls"`
+
+ // Logger is logger options: "zap", "capnslog".
+ // WARN: "capnslog" is being deprecated in v3.5.
+ Logger string `json:"logger"`
+ // LogLevel configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
+ LogLevel string `json:"log-level"`
+ // LogOutputs is either:
+ // - "default" as os.Stderr,
+ // - "stderr" as os.Stderr,
+ // - "stdout" as os.Stdout,
+ // - file path to append server logs to.
+ // It can be multiple when "Logger" is zap.
+ LogOutputs []string `json:"log-outputs"`
+
+ // ZapLoggerBuilder is used to build the zap logger.
+ ZapLoggerBuilder func(*Config) error
+
+ // logger logs server-side operations. The default is nil,
+ // and "setupLogging" must be called before starting server.
+ // Do not set logger directly.
+ loggerMu *sync.RWMutex
+ logger *zap.Logger
+
+ // loggerConfig is server logger configuration for Raft logger.
+ // Must be either: "loggerConfig != nil" or "loggerCore != nil && loggerWriteSyncer != nil".
+ loggerConfig *zap.Config
+ // loggerCore is "zapcore.Core" for raft logger.
+ // Must be either: "loggerConfig != nil" or "loggerCore != nil && loggerWriteSyncer != nil".
+ loggerCore zapcore.Core
+ loggerWriteSyncer zapcore.WriteSyncer
+
+ // EnableGRPCGateway is false to disable grpc gateway.
+ EnableGRPCGateway bool `json:"enable-grpc-gateway"`
+
+ // TO BE DEPRECATED
+
+ // DeprecatedLogOutput is to be deprecated in v3.5.
+ // Just here for safe migration in v3.4.
+ DeprecatedLogOutput []string `json:"log-output"`
+ // Debug is true, to enable debug level logging.
+ // WARNING: to be deprecated in 3.5. Use "--log-level=debug" instead.
+ Debug bool `json:"debug"`
+ // LogPkgLevels is being deprecated in v3.5.
+ // Only valid if "logger" option is "capnslog".
+ // WARN: DO NOT USE THIS!
+ LogPkgLevels string `json:"log-package-levels"`
+}
+
+// configYAML holds the config suitable for yaml parsing
+type configYAML struct {
+ Config
+ configJSON
+}
+
+// configJSON has file options that are translated into Config options
+type configJSON struct {
+ LPUrlsJSON string `json:"listen-peer-urls"`
+ LCUrlsJSON string `json:"listen-client-urls"`
+ APUrlsJSON string `json:"initial-advertise-peer-urls"`
+ ACUrlsJSON string `json:"advertise-client-urls"`
+
+ CORSJSON string `json:"cors"`
+ HostWhitelistJSON string `json:"host-whitelist"`
+
+ ClientSecurityJSON securityConfig `json:"client-transport-security"`
+ PeerSecurityJSON securityConfig `json:"peer-transport-security"`
+}
+
+type securityConfig struct {
+ CertFile string `json:"cert-file"`
+ KeyFile string `json:"key-file"`
+ CertAuth bool `json:"client-cert-auth"`
+ TrustedCAFile string `json:"trusted-ca-file"`
+ AutoTLS bool `json:"auto-tls"`
+}
+
+// NewConfig creates a new Config populated with default values.
+func NewConfig() *Config {
+ lpurl, _ := url.Parse(DefaultListenPeerURLs)
+ apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs)
+ lcurl, _ := url.Parse(DefaultListenClientURLs)
+ acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
+ cfg := &Config{
+ MaxSnapFiles: DefaultMaxSnapshots,
+ MaxWalFiles: DefaultMaxWALs,
+
+ Name: DefaultName,
+
+ SnapshotCount: etcdserver.DefaultSnapshotCount,
+ SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries,
+
+ MaxTxnOps: DefaultMaxTxnOps,
+ MaxRequestBytes: DefaultMaxRequestBytes,
+
+ GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
+ GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
+ GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout,
+
+ TickMs: 100,
+ ElectionMs: 1000,
+ InitialElectionTickAdvance: true,
+
+ LPUrls: []url.URL{*lpurl},
+ LCUrls: []url.URL{*lcurl},
+ APUrls: []url.URL{*apurl},
+ ACUrls: []url.URL{*acurl},
+
+ ClusterState: ClusterStateFlagNew,
+ InitialClusterToken: "etcd-cluster",
+
+ StrictReconfigCheck: DefaultStrictReconfigCheck,
+ Metrics: "basic",
+ EnableV2: DefaultEnableV2,
+
+ CORS: map[string]struct{}{"*": {}},
+ HostWhitelist: map[string]struct{}{"*": {}},
+
+ AuthToken: "simple",
+ BcryptCost: uint(bcrypt.DefaultCost),
+
+ PreVote: false, // TODO: enable by default in v3.5
+
+ loggerMu: new(sync.RWMutex),
+ logger: nil,
+ Logger: "capnslog",
+ DeprecatedLogOutput: []string{DefaultLogOutput},
+ LogOutputs: []string{DefaultLogOutput},
+ Debug: false,
+ LogLevel: logutil.DefaultLogLevel,
+ LogPkgLevels: "",
+ }
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ return cfg
+}
+
+func logTLSHandshakeFailure(conn *tls.Conn, err error) {
+ state := conn.ConnectionState()
+ remoteAddr := conn.RemoteAddr().String()
+ serverName := state.ServerName
+ if len(state.PeerCertificates) > 0 {
+ cert := state.PeerCertificates[0]
+ ips, dns := cert.IPAddresses, cert.DNSNames
+ plog.Infof("rejected connection from %q (error %q, ServerName %q, IPAddresses %q, DNSNames %q)", remoteAddr, err.Error(), serverName, ips, dns)
+ } else {
+ plog.Infof("rejected connection from %q (error %q, ServerName %q)", remoteAddr, err.Error(), serverName)
+ }
+}
+
+func ConfigFromFile(path string) (*Config, error) {
+ cfg := &configYAML{Config: *NewConfig()}
+ if err := cfg.configFromFile(path); err != nil {
+ return nil, err
+ }
+ return &cfg.Config, nil
+}
+
+func (cfg *configYAML) configFromFile(path string) error {
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ defaultInitialCluster := cfg.InitialCluster
+
+ err = yaml.Unmarshal(b, cfg)
+ if err != nil {
+ return err
+ }
+
+ if cfg.LPUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.LPUrls = []url.URL(u)
+ }
+
+ if cfg.LCUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.LCUrls = []url.URL(u)
+ }
+
+ if cfg.APUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.APUrls = []url.URL(u)
+ }
+
+ if cfg.ACUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.ACUrls = []url.URL(u)
+ }
+
+ if cfg.ListenMetricsUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-metrics-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.ListenMetricsUrls = []url.URL(u)
+ }
+
+ if cfg.CORSJSON != "" {
+ uv := flags.NewUniqueURLsWithExceptions(cfg.CORSJSON, "*")
+ cfg.CORS = uv.Values
+ }
+
+ if cfg.HostWhitelistJSON != "" {
+ uv := flags.NewUniqueStringsValue(cfg.HostWhitelistJSON)
+ cfg.HostWhitelist = uv.Values
+ }
+
+ // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName
+ if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = ""
+ }
+ if cfg.ClusterState == "" {
+ cfg.ClusterState = ClusterStateFlagNew
+ }
+
+ copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) {
+ tls.CertFile = ysc.CertFile
+ tls.KeyFile = ysc.KeyFile
+ tls.ClientCertAuth = ysc.CertAuth
+ tls.TrustedCAFile = ysc.TrustedCAFile
+ }
+ copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON)
+ copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON)
+ cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS
+ cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS
+
+ return cfg.Validate()
+}
+
+func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
+ if len(tls.CipherSuites) > 0 && len(ss) > 0 {
+ return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
+ }
+ if len(ss) > 0 {
+ cs := make([]uint16, len(ss))
+ for i, s := range ss {
+ var ok bool
+ cs[i], ok = tlsutil.GetCipherSuite(s)
+ if !ok {
+ return fmt.Errorf("unexpected TLS cipher suite %q", s)
+ }
+ }
+ tls.CipherSuites = cs
+ }
+ return nil
+}
+
+// Validate ensures that '*embed.Config' fields are properly configured.
+func (cfg *Config) Validate() error {
+ if err := cfg.setupLogging(); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.LPUrls); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.LCUrls); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
+ return err
+ }
+ if err := checkHostURLs(cfg.APUrls); err != nil {
+ addrs := cfg.getAPURLs()
+ return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
+ }
+ if err := checkHostURLs(cfg.ACUrls); err != nil {
+ addrs := cfg.getACURLs()
+ return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%v)`, strings.Join(addrs, ","), err)
+ }
+ // Check if conflicting flags are passed.
+ nSet := 0
+ for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} {
+ if v {
+ nSet++
+ }
+ }
+
+ if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting {
+ return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState)
+ }
+
+ if nSet > 1 {
+ return ErrConflictBootstrapFlags
+ }
+
+ if cfg.TickMs <= 0 {
+ return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs)
+ }
+ if cfg.ElectionMs <= 0 {
+ return fmt.Errorf("--election-timeout must be >0 (set to %dms)", cfg.ElectionMs)
+ }
+ if 5*cfg.TickMs > cfg.ElectionMs {
+ return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs)
+ }
+ if cfg.ElectionMs > maxElectionMs {
+ return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs)
+ }
+
+ // check this last since proxying in etcdmain may make this OK
+ if cfg.LCUrls != nil && cfg.ACUrls == nil {
+ return ErrUnsetAdvertiseClientURLsFlag
+ }
+
+ switch cfg.AutoCompactionMode {
+ case "":
+ case CompactorModeRevision, CompactorModePeriodic:
+ default:
+ return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode)
+ }
+
+ return nil
+}
+
+// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery.
+func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) {
+ token = cfg.InitialClusterToken
+ switch {
+ case cfg.Durl != "":
+ urlsmap = types.URLsMap{}
+ // If using discovery, generate a temporary cluster based on
+ // self's advertised peer URLs
+ urlsmap[cfg.Name] = cfg.APUrls
+ token = cfg.Durl
+
+ case cfg.DNSCluster != "":
+ clusterStrs, cerr := cfg.GetDNSClusterNames()
+ lg := cfg.logger
+ if cerr != nil {
+ if lg != nil {
+ lg.Warn("failed to resolve during SRV discovery", zap.Error(cerr))
+ } else {
+ plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr)
+ }
+ return nil, "", cerr
+ }
+ for _, s := range clusterStrs {
+ if lg != nil {
+ lg.Info("got bootstrap from DNS for etcd-server", zap.String("node", s))
+ } else {
+ plog.Noticef("got bootstrap from DNS for etcd-server at %s", s)
+ }
+ }
+ clusterStr := strings.Join(clusterStrs, ",")
+ if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" {
+ cfg.PeerTLSInfo.ServerName = cfg.DNSCluster
+ }
+ urlsmap, err = types.NewURLsMap(clusterStr)
+ // only etcd member must belong to the discovered cluster.
+ // proxy does not need to belong to the discovered cluster.
+ if which == "etcd" {
+ if _, ok := urlsmap[cfg.Name]; !ok {
+ return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name)
+ }
+ }
+
+ default:
+ // We're statically configured, and cluster has appropriately been set.
+ urlsmap, err = types.NewURLsMap(cfg.InitialCluster)
+ }
+ return urlsmap, token, err
+}
+
+// GetDNSClusterNames uses DNS SRV records to get a list of initial nodes for cluster bootstrapping.
+func (cfg *Config) GetDNSClusterNames() ([]string, error) {
+ var (
+ clusterStrs []string
+ cerr error
+ serviceNameSuffix string
+ )
+ if cfg.DNSClusterServiceName != "" {
+ serviceNameSuffix = "-" + cfg.DNSClusterServiceName
+ }
+
+ lg := cfg.GetLogger()
+
+ // Use both etcd-server-ssl and etcd-server for discovery.
+ // Combine the results if both are available.
+ clusterStrs, cerr = srv.GetCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
+ if cerr != nil {
+ clusterStrs = make([]string, 0)
+ }
+ if lg != nil {
+ lg.Info(
+ "get cluster for etcd-server-ssl SRV",
+ zap.String("service-scheme", "https"),
+ zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix),
+ zap.String("server-name", cfg.Name),
+ zap.String("discovery-srv", cfg.DNSCluster),
+ zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
+ zap.Strings("found-cluster", clusterStrs),
+ zap.Error(cerr),
+ )
+ }
+
+ defaultHTTPClusterStrs, httpCerr := srv.GetCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.APUrls)
+ if httpCerr != nil {
+ clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...)
+ }
+ if lg != nil {
+ lg.Info(
+ "get cluster for etcd-server SRV",
+ zap.String("service-scheme", "http"),
+ zap.String("service-name", "etcd-server"+serviceNameSuffix),
+ zap.String("server-name", cfg.Name),
+ zap.String("discovery-srv", cfg.DNSCluster),
+ zap.Strings("advertise-peer-urls", cfg.getAPURLs()),
+ zap.Strings("found-cluster", clusterStrs),
+ zap.Error(httpCerr),
+ )
+ }
+
+ return clusterStrs, cerr
+}
+
+func (cfg Config) InitialClusterFromName(name string) (ret string) {
+ if len(cfg.APUrls) == 0 {
+ return ""
+ }
+ n := name
+ if name == "" {
+ n = DefaultName
+ }
+ for i := range cfg.APUrls {
+ ret = ret + "," + n + "=" + cfg.APUrls[i].String()
+ }
+ return ret[1:]
+}
+
+func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
+func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
+
+func (cfg Config) defaultPeerHost() bool {
+ return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
+}
+
+func (cfg Config) defaultClientHost() bool {
+ return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
+}
+
+func (cfg *Config) ClientSelfCert() (err error) {
+ if !cfg.ClientAutoTLS {
+ return nil
+ }
+ if !cfg.ClientTLSInfo.Empty() {
+ if cfg.logger != nil {
+ cfg.logger.Warn("ignoring client auto TLS since certs given")
+ } else {
+ plog.Warningf("ignoring client auto TLS since certs given")
+ }
+ return nil
+ }
+ chosts := make([]string, len(cfg.LCUrls))
+ for i, u := range cfg.LCUrls {
+ chosts[i] = u.Host
+ }
+ cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
+ if err != nil {
+ return err
+ }
+ return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites)
+}
+
+func (cfg *Config) PeerSelfCert() (err error) {
+ if !cfg.PeerAutoTLS {
+ return nil
+ }
+ if !cfg.PeerTLSInfo.Empty() {
+ if cfg.logger != nil {
+ cfg.logger.Warn("ignoring peer auto TLS since certs given")
+ } else {
+ plog.Warningf("ignoring peer auto TLS since certs given")
+ }
+ return nil
+ }
+ phosts := make([]string, len(cfg.LPUrls))
+ for i, u := range cfg.LPUrls {
+ phosts[i] = u.Host
+ }
+ cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
+ if err != nil {
+ return err
+ }
+ return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites)
+}
+
+// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
+// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
+// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
+// then the advertise peer host would be updated with machine's default host,
+// while keeping the listen URL's port.
+// User can work around this by explicitly setting URL with 127.0.0.1.
+// It returns the default hostname, if used, and the error, if any, from getting the machine's default host.
+// TODO: check whether fields are set instead of whether fields have default value
+func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) {
+ if defaultHostname == "" || defaultHostStatus != nil {
+ // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
+ if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ }
+ return "", defaultHostStatus
+ }
+
+ used := false
+ pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port()
+ if cfg.defaultPeerHost() && pip == "0.0.0.0" {
+ cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
+ used = true
+ }
+ // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
+ if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ }
+
+ cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port()
+ if cfg.defaultClientHost() && cip == "0.0.0.0" {
+ cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
+ used = true
+ }
+ dhost := defaultHostname
+ if !used {
+ dhost = ""
+ }
+ return dhost, defaultHostStatus
+}
+
+// checkBindURLs returns an error if any URL uses a domain name.
+func checkBindURLs(urls []url.URL) error {
+ for _, url := range urls {
+ if url.Scheme == "unix" || url.Scheme == "unixs" {
+ continue
+ }
+ host, _, err := net.SplitHostPort(url.Host)
+ if err != nil {
+ return err
+ }
+ if host == "localhost" {
+ // special case for local address
+ // TODO: support /etc/hosts ?
+ continue
+ }
+ if net.ParseIP(host) == nil {
+ return fmt.Errorf("expected IP in URL for binding (%s)", url.String())
+ }
+ }
+ return nil
+}
+
+func checkHostURLs(urls []url.URL) error {
+ for _, url := range urls {
+ host, _, err := net.SplitHostPort(url.Host)
+ if err != nil {
+ return err
+ }
+ if host == "" {
+ return fmt.Errorf("unexpected empty host (%s)", url.String())
+ }
+ }
+ return nil
+}
+
+func (cfg *Config) getAPURLs() (ss []string) {
+ ss = make([]string, len(cfg.APUrls))
+ for i := range cfg.APUrls {
+ ss[i] = cfg.APUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getLPURLs() (ss []string) {
+ ss = make([]string, len(cfg.LPUrls))
+ for i := range cfg.LPUrls {
+ ss[i] = cfg.LPUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getACURLs() (ss []string) {
+ ss = make([]string, len(cfg.ACUrls))
+ for i := range cfg.ACUrls {
+ ss[i] = cfg.ACUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getLCURLs() (ss []string) {
+ ss = make([]string, len(cfg.LCUrls))
+ for i := range cfg.LCUrls {
+ ss[i] = cfg.LCUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getMetricsURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenMetricsUrls))
+ for i := range cfg.ListenMetricsUrls {
+ ss[i] = cfg.ListenMetricsUrls[i].String()
+ }
+ return ss
+}
+
+func parseBackendFreelistType(freelistType string) bolt.FreelistType {
+ if freelistType == freelistMapType {
+ return bolt.FreelistMapType
+ }
+
+ return bolt.FreelistArrayType
+}
diff --git a/vendor/go.etcd.io/etcd/embed/config_logging.go b/vendor/go.etcd.io/etcd/embed/config_logging.go
new file mode 100644
index 000000000000..e42103cb18c2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/embed/config_logging.go
@@ -0,0 +1,312 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "sync"
+
+ "go.etcd.io/etcd/pkg/logutil"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/grpclog"
+)
+
+// GetLogger returns the logger.
+func (cfg Config) GetLogger() *zap.Logger {
+ cfg.loggerMu.RLock()
+ l := cfg.logger
+ cfg.loggerMu.RUnlock()
+ return l
+}
+
+// for testing
+var grpcLogOnce = new(sync.Once)
+
+// setupLogging initializes etcd logging.
+// Must be called after flag parsing or finishing configuring embed.Config.
+func (cfg *Config) setupLogging() error {
+ // handle "DeprecatedLogOutput" in v3.4
+ // TODO: remove "DeprecatedLogOutput" in v3.5
+ len1 := len(cfg.DeprecatedLogOutput)
+ len2 := len(cfg.LogOutputs)
+ if len1 != len2 {
+ switch {
+ case len1 > len2: // deprecate "log-output" flag is used
+ fmt.Fprintln(os.Stderr, "'--log-output' flag has been deprecated! Please use '--log-outputs'!")
+ cfg.LogOutputs = cfg.DeprecatedLogOutput
+ case len1 < len2: // "--log-outputs" flag has been set with multiple writers
+ cfg.DeprecatedLogOutput = []string{}
+ }
+ } else {
+ if len1 > 1 {
+ return errors.New("both '--log-output' and '--log-outputs' are set; only set '--log-outputs'")
+ }
+ if len1 < 1 {
+ return errors.New("either '--log-output' or '--log-outputs' flag must be set")
+ }
+ if reflect.DeepEqual(cfg.DeprecatedLogOutput, cfg.LogOutputs) && cfg.DeprecatedLogOutput[0] != DefaultLogOutput {
+ return fmt.Errorf("'--log-output=%q' and '--log-outputs=%q' are incompatible; only set --log-outputs", cfg.DeprecatedLogOutput, cfg.LogOutputs)
+ }
+ if !reflect.DeepEqual(cfg.DeprecatedLogOutput, []string{DefaultLogOutput}) {
+ fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--log-output' flag is set to %q\n", cfg.DeprecatedLogOutput)
+ fmt.Fprintln(os.Stderr, "Please use '--log-outputs' flag")
+ }
+ }
+
+ // TODO: remove after deprecating log related flags in v3.5
+ if cfg.Debug {
+ fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--debug' flag is set to %v (use '--log-level=debug' instead\n", cfg.Debug)
+ }
+ if cfg.Debug && cfg.LogLevel != "debug" {
+ fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--debug' flag is set to %v with inconsistent '--log-level=%s' flag\n", cfg.Debug, cfg.LogLevel)
+ }
+ if cfg.Logger == "capnslog" {
+ fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--logger=%s' flag is set; use '--logger=zap' flag instead\n", cfg.Logger)
+ }
+ if cfg.LogPkgLevels != "" {
+ fmt.Fprintf(os.Stderr, "[WARNING] Deprecated '--log-package-levels=%s' flag is set; use '--logger=zap' flag instead\n", cfg.LogPkgLevels)
+ }
+
+ switch cfg.Logger {
+ case "capnslog": // TODO: deprecate this in v3.5
+ cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
+ cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
+
+ if cfg.Debug {
+ capnslog.SetGlobalLogLevel(capnslog.DEBUG)
+ grpc.EnableTracing = true
+ // enable info, warning, error
+ grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
+ } else {
+ capnslog.SetGlobalLogLevel(logutil.ConvertToCapnslogLogLevel(cfg.LogLevel))
+ // only discard info
+ grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
+ }
+
+ // TODO: deprecate with "capnslog"
+ if cfg.LogPkgLevels != "" {
+ repoLog := capnslog.MustRepoLogger("go.etcd.io/etcd")
+ settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels)
+ if err != nil {
+ plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error())
+ return nil
+ }
+ repoLog.SetLogLevel(settings)
+ }
+
+ if len(cfg.LogOutputs) != 1 {
+ return fmt.Errorf("--logger=capnslog supports only 1 value in '--log-outputs', got %q", cfg.LogOutputs)
+ }
+ // capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr))
+ // where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1
+ // specify 'stdout' or 'stderr' to skip journald logging even when running under systemd
+ output := cfg.LogOutputs[0]
+ switch output {
+ case StdErrLogOutput:
+ capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug))
+ case StdOutLogOutput:
+ capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug))
+ case DefaultLogOutput:
+ default:
+ return fmt.Errorf("unknown log-output %q (only supports %q, %q, %q)", output, DefaultLogOutput, StdErrLogOutput, StdOutLogOutput)
+ }
+
+ case "zap":
+ if len(cfg.LogOutputs) == 0 {
+ cfg.LogOutputs = []string{DefaultLogOutput}
+ }
+ if len(cfg.LogOutputs) > 1 {
+ for _, v := range cfg.LogOutputs {
+ if v == DefaultLogOutput {
+ return fmt.Errorf("multi logoutput for %q is not supported yet", DefaultLogOutput)
+ }
+ }
+ }
+
+ outputPaths, errOutputPaths := make([]string, 0), make([]string, 0)
+ isJournal := false
+ for _, v := range cfg.LogOutputs {
+ switch v {
+ case DefaultLogOutput:
+ outputPaths = append(outputPaths, StdErrLogOutput)
+ errOutputPaths = append(errOutputPaths, StdErrLogOutput)
+
+ case JournalLogOutput:
+ isJournal = true
+
+ case StdErrLogOutput:
+ outputPaths = append(outputPaths, StdErrLogOutput)
+ errOutputPaths = append(errOutputPaths, StdErrLogOutput)
+
+ case StdOutLogOutput:
+ outputPaths = append(outputPaths, StdOutLogOutput)
+ errOutputPaths = append(errOutputPaths, StdOutLogOutput)
+
+ default:
+ outputPaths = append(outputPaths, v)
+ errOutputPaths = append(errOutputPaths, v)
+ }
+ }
+
+ if !isJournal {
+ copied := logutil.DefaultZapLoggerConfig
+ copied.OutputPaths = outputPaths
+ copied.ErrorOutputPaths = errOutputPaths
+ copied = logutil.MergeOutputPaths(copied)
+ copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
+ if cfg.Debug || cfg.LogLevel == "debug" {
+ // enable tracing even when "--debug --log-level info"
+ // in order to keep backward compatibility with <= v3.3
+ // TODO: remove "Debug" check in v3.5
+ grpc.EnableTracing = true
+ }
+ if cfg.ZapLoggerBuilder == nil {
+ cfg.ZapLoggerBuilder = func(c *Config) error {
+ var err error
+ c.logger, err = copied.Build()
+ if err != nil {
+ return err
+ }
+ c.loggerMu.Lock()
+ defer c.loggerMu.Unlock()
+ c.loggerConfig = &copied
+ c.loggerCore = nil
+ c.loggerWriteSyncer = nil
+ grpcLogOnce.Do(func() {
+ // debug true, enable info, warning, error
+ // debug false, only discard info
+ var gl grpclog.LoggerV2
+ gl, err = logutil.NewGRPCLoggerV2(copied)
+ if err == nil {
+ grpclog.SetLoggerV2(gl)
+ }
+ })
+ return nil
+ }
+ }
+ } else {
+ if len(cfg.LogOutputs) > 1 {
+ for _, v := range cfg.LogOutputs {
+ if v != DefaultLogOutput {
+ return fmt.Errorf("running with systemd/journal but other '--log-outputs' values (%q) are configured with 'default'; override 'default' value with something else", cfg.LogOutputs)
+ }
+ }
+ }
+
+ // use stderr as fallback
+ syncer, lerr := getJournalWriteSyncer()
+ if lerr != nil {
+ return lerr
+ }
+
+ lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
+ if cfg.Debug || cfg.LogLevel == "debug" {
+ // enable tracing even when "--debug --log-level info"
+ // in order to keep backward compatibility with <= v3.3
+ // TODO: remove "Debug" check in v3.5
+ grpc.EnableTracing = true
+ }
+
+ // WARN: do not change field names in encoder config
+ // journald logging writer assumes field names of "level" and "caller"
+ cr := zapcore.NewCore(
+ zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig),
+ syncer,
+ lvl,
+ )
+ if cfg.ZapLoggerBuilder == nil {
+ cfg.ZapLoggerBuilder = func(c *Config) error {
+ c.logger = zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer))
+ c.loggerMu.Lock()
+ defer c.loggerMu.Unlock()
+ c.loggerConfig = nil
+ c.loggerCore = cr
+ c.loggerWriteSyncer = syncer
+
+ grpcLogOnce.Do(func() {
+ grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer))
+ })
+ return nil
+ }
+ }
+ }
+
+ err := cfg.ZapLoggerBuilder(cfg)
+ if err != nil {
+ return err
+ }
+
+ logTLSHandshakeFailure := func(conn *tls.Conn, err error) {
+ state := conn.ConnectionState()
+ remoteAddr := conn.RemoteAddr().String()
+ serverName := state.ServerName
+ if len(state.PeerCertificates) > 0 {
+ cert := state.PeerCertificates[0]
+ ips := make([]string, len(cert.IPAddresses))
+ for i := range cert.IPAddresses {
+ ips[i] = cert.IPAddresses[i].String()
+ }
+ cfg.logger.Warn(
+ "rejected connection",
+ zap.String("remote-addr", remoteAddr),
+ zap.String("server-name", serverName),
+ zap.Strings("ip-addresses", ips),
+ zap.Strings("dns-names", cert.DNSNames),
+ zap.Error(err),
+ )
+ } else {
+ cfg.logger.Warn(
+ "rejected connection",
+ zap.String("remote-addr", remoteAddr),
+ zap.String("server-name", serverName),
+ zap.Error(err),
+ )
+ }
+ }
+ cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
+ cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
+
+ default:
+ return fmt.Errorf("unknown logger option %q", cfg.Logger)
+ }
+
+ return nil
+}
+
+// NewZapCoreLoggerBuilder generates a zap core logger builder.
+func NewZapCoreLoggerBuilder(lg *zap.Logger, cr zapcore.Core, syncer zapcore.WriteSyncer) func(*Config) error {
+ return func(cfg *Config) error {
+ cfg.loggerMu.Lock()
+ defer cfg.loggerMu.Unlock()
+ cfg.logger = lg
+ cfg.loggerConfig = nil
+ cfg.loggerCore = cr
+ cfg.loggerWriteSyncer = syncer
+
+ grpcLogOnce.Do(func() {
+ grpclog.SetLoggerV2(logutil.NewGRPCLoggerV2FromZapCore(cr, syncer))
+ })
+ return nil
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/embed/config_logging_journal_unix.go b/vendor/go.etcd.io/etcd/embed/config_logging_journal_unix.go
new file mode 100644
index 000000000000..44a51d677089
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/embed/config_logging_journal_unix.go
@@ -0,0 +1,35 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package embed
+
+import (
+ "fmt"
+ "os"
+
+ "go.etcd.io/etcd/pkg/logutil"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// use stderr as fallback
+func getJournalWriteSyncer() (zapcore.WriteSyncer, error) {
+ jw, err := logutil.NewJournalWriter(os.Stderr)
+ if err != nil {
+ return nil, fmt.Errorf("can't find journal (%v)", err)
+ }
+ return zapcore.AddSync(jw), nil
+}
diff --git a/vendor/go.etcd.io/etcd/embed/config_logging_journal_windows.go b/vendor/go.etcd.io/etcd/embed/config_logging_journal_windows.go
new file mode 100644
index 000000000000..5b762564848f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/embed/config_logging_journal_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package embed
+
+import (
+ "os"
+
+ "go.uber.org/zap/zapcore"
+)
+
+func getJournalWriteSyncer() (zapcore.WriteSyncer, error) {
+ return zapcore.AddSync(os.Stderr), nil
+}
diff --git a/vendor/go.etcd.io/etcd/embed/doc.go b/vendor/go.etcd.io/etcd/embed/doc.go
new file mode 100644
index 000000000000..4811bb63430a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/embed/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package embed provides bindings for embedding an etcd server in a program.
+
+Launch an embedded etcd server using the configuration defaults:
+
+ import (
+ "log"
+ "time"
+
+ "go.etcd.io/etcd/embed"
+ )
+
+ func main() {
+ cfg := embed.NewConfig()
+ cfg.Dir = "default.etcd"
+ e, err := embed.StartEtcd(cfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer e.Close()
+ select {
+ case <-e.Server.ReadyNotify():
+ log.Printf("Server is ready!")
+ case <-time.After(60 * time.Second):
+ e.Server.Stop() // trigger a shutdown
+ log.Printf("Server took too long to start!")
+ }
+ log.Fatal(<-e.Err())
+ }
+*/
+package embed
diff --git a/vendor/go.etcd.io/etcd/embed/etcd.go b/vendor/go.etcd.io/etcd/embed/etcd.go
new file mode 100644
index 000000000000..ac7dbc987fb7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/embed/etcd.go
@@ -0,0 +1,829 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "io/ioutil"
+ defaultLog "log"
+ "net"
+ "net/http"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/etcdhttp"
+ "go.etcd.io/etcd/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/etcdserver/api/v2http"
+ "go.etcd.io/etcd/etcdserver/api/v2v3"
+ "go.etcd.io/etcd/etcdserver/api/v3client"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc"
+ "go.etcd.io/etcd/pkg/debugutil"
+ runtimeutil "go.etcd.io/etcd/pkg/runtime"
+ "go.etcd.io/etcd/pkg/transport"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/version"
+
+ "github.com/coreos/pkg/capnslog"
+ grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+ "github.com/soheilhy/cmux"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/keepalive"
+)
+
+var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "embed")
+
+const (
+ // internal fd usage includes disk usage and transport usage.
+ // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
+ // at most 2 to read/lock/write WALs. One case that it needs to 2 is to
+ // read all logs after some snapshot index, which locates at the end of
+ // the second last and the head of the last. For purging, it needs to read
+ // directory, so it needs 1. For fd monitor, it needs 1.
+ // For transport, rafthttp builds two long-polling connections and at most
+ // four temporary connections with each member. There are at most 9 members
+ // in a cluster, so it should reserve 96.
+ // For the safety, we set the total reserved number to 150.
+ reservedInternalFDNum = 150
+)
+
+// Etcd contains a running etcd server and its listeners.
+type Etcd struct {
+ Peers []*peerListener
+ Clients []net.Listener
+ // a map of contexts for the servers that serves client requests.
+ sctxs map[string]*serveCtx
+ metricsListeners []net.Listener
+
+ Server *etcdserver.EtcdServer
+
+ cfg Config
+ stopc chan struct{}
+ errc chan error
+
+ closeOnce sync.Once
+}
+
+type peerListener struct {
+ net.Listener
+ serve func() error
+ close func(context.Context) error
+}
+
+// StartEtcd launches the etcd server and HTTP handlers for client/server communication.
+// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
+// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
+func StartEtcd(inCfg *Config) (e *Etcd, err error) {
+ if err = inCfg.Validate(); err != nil {
+ return nil, err
+ }
+ serving := false
+ e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
+ cfg := &e.cfg
+ defer func() {
+ if e == nil || err == nil {
+ return
+ }
+ if !serving {
+ // errored before starting gRPC server for serveCtx.serversC
+ for _, sctx := range e.sctxs {
+ close(sctx.serversC)
+ }
+ }
+ e.Close()
+ e = nil
+ }()
+
+ if e.cfg.logger != nil {
+ e.cfg.logger.Info(
+ "configuring peer listeners",
+ zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
+ )
+ }
+ if e.Peers, err = configurePeerListeners(cfg); err != nil {
+ return e, err
+ }
+
+ if e.cfg.logger != nil {
+ e.cfg.logger.Info(
+ "configuring client listeners",
+ zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
+ )
+ }
+ if e.sctxs, err = configureClientListeners(cfg); err != nil {
+ return e, err
+ }
+
+ for _, sctx := range e.sctxs {
+ e.Clients = append(e.Clients, sctx.l)
+ }
+
+ var (
+ urlsmap types.URLsMap
+ token string
+ )
+ memberInitialized := true
+ if !isMemberInitialized(cfg) {
+ memberInitialized = false
+ urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
+ if err != nil {
+ return e, fmt.Errorf("error setting up initial cluster: %v", err)
+ }
+ }
+
+ // AutoCompactionRetention defaults to "0" if not set.
+ if len(cfg.AutoCompactionRetention) == 0 {
+ cfg.AutoCompactionRetention = "0"
+ }
+ autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention)
+ if err != nil {
+ return e, err
+ }
+
+ backendFreelistType := parseBackendFreelistType(cfg.ExperimentalBackendFreelistType)
+
+ srvcfg := etcdserver.ServerConfig{
+ Name: cfg.Name,
+ ClientURLs: cfg.ACUrls,
+ PeerURLs: cfg.APUrls,
+ DataDir: cfg.Dir,
+ DedicatedWALDir: cfg.WalDir,
+ SnapshotCount: cfg.SnapshotCount,
+ SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries,
+ MaxSnapFiles: cfg.MaxSnapFiles,
+ MaxWALFiles: cfg.MaxWalFiles,
+ InitialPeerURLsMap: urlsmap,
+ InitialClusterToken: token,
+ DiscoveryURL: cfg.Durl,
+ DiscoveryProxy: cfg.Dproxy,
+ NewCluster: cfg.IsNewCluster(),
+ PeerTLSInfo: cfg.PeerTLSInfo,
+ TickMs: cfg.TickMs,
+ ElectionTicks: cfg.ElectionTicks(),
+ InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
+ AutoCompactionRetention: autoCompactionRetention,
+ AutoCompactionMode: cfg.AutoCompactionMode,
+ QuotaBackendBytes: cfg.QuotaBackendBytes,
+ BackendBatchLimit: cfg.BackendBatchLimit,
+ BackendFreelistType: backendFreelistType,
+ BackendBatchInterval: cfg.BackendBatchInterval,
+ MaxTxnOps: cfg.MaxTxnOps,
+ MaxRequestBytes: cfg.MaxRequestBytes,
+ StrictReconfigCheck: cfg.StrictReconfigCheck,
+ ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
+ AuthToken: cfg.AuthToken,
+ BcryptCost: cfg.BcryptCost,
+ CORS: cfg.CORS,
+ HostWhitelist: cfg.HostWhitelist,
+ InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck,
+ CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
+ PreVote: cfg.PreVote,
+ Logger: cfg.logger,
+ LoggerConfig: cfg.loggerConfig,
+ LoggerCore: cfg.loggerCore,
+ LoggerWriteSyncer: cfg.loggerWriteSyncer,
+ Debug: cfg.Debug,
+ ForceNewCluster: cfg.ForceNewCluster,
+ EnableGRPCGateway: cfg.EnableGRPCGateway,
+ EnableLeaseCheckpoint: cfg.ExperimentalEnableLeaseCheckpoint,
+ CompactionBatchLimit: cfg.ExperimentalCompactionBatchLimit,
+ }
+ print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
+ if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
+ return e, err
+ }
+
+ // buffer channel so goroutines on closed connections won't wait forever
+ e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
+
+ // newly started member ("memberInitialized==false")
+ // does not need corruption check
+ if memberInitialized {
+ if err = e.Server.CheckInitialHashKV(); err != nil {
+ // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
+ // (nothing to close since rafthttp transports have not been started)
+ e.Server = nil
+ return e, err
+ }
+ }
+ e.Server.Start()
+
+ if err = e.servePeers(); err != nil {
+ return e, err
+ }
+ if err = e.serveClients(); err != nil {
+ return e, err
+ }
+ if err = e.serveMetrics(); err != nil {
+ return e, err
+ }
+
+ if e.cfg.logger != nil {
+ e.cfg.logger.Info(
+ "now serving peer/client/metrics",
+ zap.String("local-member-id", e.Server.ID().String()),
+ zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()),
+ zap.Strings("listen-peer-urls", e.cfg.getLPURLs()),
+ zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
+ zap.Strings("listen-client-urls", e.cfg.getLCURLs()),
+ zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()),
+ )
+ }
+ serving = true
+ return e, nil
+}
+
+func print(lg *zap.Logger, ec Config, sc etcdserver.ServerConfig, memberInitialized bool) {
+ // TODO: remove this after dropping "capnslog"
+ if lg == nil {
+ plog.Infof("name = %s", ec.Name)
+ if sc.ForceNewCluster {
+ plog.Infof("force new cluster")
+ }
+ plog.Infof("data dir = %s", sc.DataDir)
+ plog.Infof("member dir = %s", sc.MemberDir())
+ if sc.DedicatedWALDir != "" {
+ plog.Infof("dedicated WAL dir = %s", sc.DedicatedWALDir)
+ }
+ plog.Infof("heartbeat = %dms", sc.TickMs)
+ plog.Infof("election = %dms", sc.ElectionTicks*int(sc.TickMs))
+ plog.Infof("snapshot count = %d", sc.SnapshotCount)
+ if len(sc.DiscoveryURL) != 0 {
+ plog.Infof("discovery URL= %s", sc.DiscoveryURL)
+ if len(sc.DiscoveryProxy) != 0 {
+ plog.Infof("discovery proxy = %s", sc.DiscoveryProxy)
+ }
+ }
+ plog.Infof("advertise client URLs = %s", sc.ClientURLs)
+ if memberInitialized {
+ plog.Infof("initial advertise peer URLs = %s", sc.PeerURLs)
+ plog.Infof("initial cluster = %s", sc.InitialPeerURLsMap)
+ }
+ } else {
+ cors := make([]string, 0, len(ec.CORS))
+ for v := range ec.CORS {
+ cors = append(cors, v)
+ }
+ sort.Strings(cors)
+
+ hss := make([]string, 0, len(ec.HostWhitelist))
+ for v := range ec.HostWhitelist {
+ hss = append(hss, v)
+ }
+ sort.Strings(hss)
+
+ quota := ec.QuotaBackendBytes
+ if quota == 0 {
+ quota = etcdserver.DefaultQuotaBytes
+ }
+
+ lg.Info(
+ "starting an etcd server",
+ zap.String("etcd-version", version.Version),
+ zap.String("git-sha", version.GitSHA),
+ zap.String("go-version", runtime.Version()),
+ zap.String("go-os", runtime.GOOS),
+ zap.String("go-arch", runtime.GOARCH),
+ zap.Int("max-cpu-set", runtime.GOMAXPROCS(0)),
+ zap.Int("max-cpu-available", runtime.NumCPU()),
+ zap.Bool("member-initialized", memberInitialized),
+ zap.String("name", sc.Name),
+ zap.String("data-dir", sc.DataDir),
+ zap.String("wal-dir", ec.WalDir),
+ zap.String("wal-dir-dedicated", sc.DedicatedWALDir),
+ zap.String("member-dir", sc.MemberDir()),
+ zap.Bool("force-new-cluster", sc.ForceNewCluster),
+ zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)),
+ zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)),
+ zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance),
+ zap.Uint64("snapshot-count", sc.SnapshotCount),
+ zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
+ zap.Strings("initial-advertise-peer-urls", ec.getAPURLs()),
+ zap.Strings("listen-peer-urls", ec.getLPURLs()),
+ zap.Strings("advertise-client-urls", ec.getACURLs()),
+ zap.Strings("listen-client-urls", ec.getLCURLs()),
+ zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
+ zap.Strings("cors", cors),
+ zap.Strings("host-whitelist", hss),
+ zap.String("initial-cluster", sc.InitialPeerURLsMap.String()),
+ zap.String("initial-cluster-state", ec.ClusterState),
+ zap.String("initial-cluster-token", sc.InitialClusterToken),
+ zap.Int64("quota-size-bytes", quota),
+ zap.Bool("pre-vote", sc.PreVote),
+ zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck),
+ zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()),
+ zap.String("auto-compaction-mode", sc.AutoCompactionMode),
+ zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention),
+ zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()),
+ zap.String("discovery-url", sc.DiscoveryURL),
+ zap.String("discovery-proxy", sc.DiscoveryProxy),
+ )
+ }
+}
+
+// Config returns the current configuration.
+func (e *Etcd) Config() Config {
+ return e.cfg
+}
+
+// Close gracefully shuts down all servers/listeners.
+// Client requests will be terminated with request timeout.
+// After timeout, enforce remaning requests be closed immediately.
+func (e *Etcd) Close() {
+ fields := []zap.Field{
+ zap.String("name", e.cfg.Name),
+ zap.String("data-dir", e.cfg.Dir),
+ zap.Strings("advertise-peer-urls", e.cfg.getAPURLs()),
+ zap.Strings("advertise-client-urls", e.cfg.getACURLs()),
+ }
+ lg := e.GetLogger()
+ if lg != nil {
+ lg.Info("closing etcd server", fields...)
+ }
+ defer func() {
+ if lg != nil {
+ lg.Info("closed etcd server", fields...)
+ lg.Sync()
+ }
+ }()
+
+ e.closeOnce.Do(func() { close(e.stopc) })
+
+ // close client requests with request timeout
+ timeout := 2 * time.Second
+ if e.Server != nil {
+ timeout = e.Server.Cfg.ReqTimeout()
+ }
+ for _, sctx := range e.sctxs {
+ for ss := range sctx.serversC {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ stopServers(ctx, ss)
+ cancel()
+ }
+ }
+
+ for _, sctx := range e.sctxs {
+ sctx.cancel()
+ }
+
+ for i := range e.Clients {
+ if e.Clients[i] != nil {
+ e.Clients[i].Close()
+ }
+ }
+
+ for i := range e.metricsListeners {
+ e.metricsListeners[i].Close()
+ }
+
+ // close rafthttp transports
+ if e.Server != nil {
+ e.Server.Stop()
+ }
+
+ // close all idle connections in peer handler (wait up to 1-second)
+ for i := range e.Peers {
+ if e.Peers[i] != nil && e.Peers[i].close != nil {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ e.Peers[i].close(ctx)
+ cancel()
+ }
+ }
+}
+
+func stopServers(ctx context.Context, ss *servers) {
+ shutdownNow := func() {
+ // first, close the http.Server
+ ss.http.Shutdown(ctx)
+ // then close grpc.Server; cancels all active RPCs
+ ss.grpc.Stop()
+ }
+
+ // do not grpc.Server.GracefulStop with TLS enabled etcd server
+ // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
+ // and https://github.com/etcd-io/etcd/issues/8916
+ if ss.secure {
+ shutdownNow()
+ return
+ }
+
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ // close listeners to stop accepting new connections,
+ // will block on any existing transports
+ ss.grpc.GracefulStop()
+ }()
+
+ // wait until all pending RPCs are finished
+ select {
+ case <-ch:
+ case <-ctx.Done():
+ // took too long, manually close open transports
+ // e.g. watch streams
+ shutdownNow()
+
+ // concurrent GracefulStop should be interrupted
+ <-ch
+ }
+}
+
+func (e *Etcd) Err() <-chan error { return e.errc }
+
+func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
+ if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
+ return nil, err
+ }
+ if err = cfg.PeerSelfCert(); err != nil {
+ if cfg.logger != nil {
+ cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err))
+ } else {
+ plog.Fatalf("could not get certs (%v)", err)
+ }
+ }
+ if !cfg.PeerTLSInfo.Empty() {
+ if cfg.logger != nil {
+ cfg.logger.Info(
+ "starting with peer TLS",
+ zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo)),
+ zap.Strings("cipher-suites", cfg.CipherSuites),
+ )
+ } else {
+ plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
+ }
+ }
+
+ peers = make([]*peerListener, len(cfg.LPUrls))
+ defer func() {
+ if err == nil {
+ return
+ }
+ for i := range peers {
+ if peers[i] != nil && peers[i].close != nil {
+ if cfg.logger != nil {
+ cfg.logger.Warn(
+ "closing peer listener",
+ zap.String("address", cfg.LPUrls[i].String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ peers[i].close(ctx)
+ cancel()
+ }
+ }
+ }()
+
+ for i, u := range cfg.LPUrls {
+ if u.Scheme == "http" {
+ if !cfg.PeerTLSInfo.Empty() {
+ if cfg.logger != nil {
+ cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String()))
+ } else {
+ plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
+ }
+ }
+ if cfg.PeerTLSInfo.ClientCertAuth {
+ if cfg.logger != nil {
+ cfg.logger.Warn("scheme is HTTP while --peer-client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("peer-url", u.String()))
+ } else {
+ plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
+ }
+ }
+ }
+ peers[i] = &peerListener{close: func(context.Context) error { return nil }}
+ peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo)
+ if err != nil {
+ return nil, err
+ }
+ // once serve, overwrite with 'http.Server.Shutdown'
+ peers[i].close = func(context.Context) error {
+ return peers[i].Listener.Close()
+ }
+ }
+ return peers, nil
+}
+
+// configure peer handlers after rafthttp.Transport started
+func (e *Etcd) servePeers() (err error) {
+ ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server)
+ var peerTLScfg *tls.Config
+ if !e.cfg.PeerTLSInfo.Empty() {
+ if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil {
+ return err
+ }
+ }
+
+ for _, p := range e.Peers {
+ u := p.Listener.Addr().String()
+ gs := v3rpc.Server(e.Server, peerTLScfg)
+ m := cmux.New(p.Listener)
+ go gs.Serve(m.Match(cmux.HTTP2()))
+ srv := &http.Server{
+ Handler: grpcHandlerFunc(gs, ph),
+ ReadTimeout: 5 * time.Minute,
+ ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
+ }
+ go srv.Serve(m.Match(cmux.Any()))
+ p.serve = func() error { return m.Serve() }
+ p.close = func(ctx context.Context) error {
+ // gracefully shutdown http.Server
+ // close open listeners, idle connections
+ // until context cancel or time-out
+ if e.cfg.logger != nil {
+ e.cfg.logger.Info(
+ "stopping serving peer traffic",
+ zap.String("address", u),
+ )
+ }
+ stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv})
+ if e.cfg.logger != nil {
+ e.cfg.logger.Info(
+ "stopped serving peer traffic",
+ zap.String("address", u),
+ )
+ }
+ return nil
+ }
+ }
+
+ // start peer servers in a goroutine
+ for _, pl := range e.Peers {
+ go func(l *peerListener) {
+ u := l.Addr().String()
+ if e.cfg.logger != nil {
+ e.cfg.logger.Info(
+ "serving peer traffic",
+ zap.String("address", u),
+ )
+ } else {
+ plog.Info("listening for peers on ", u)
+ }
+ e.errHandler(l.serve())
+ }(pl)
+ }
+ return nil
+}
+
+func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
+ if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
+ return nil, err
+ }
+ if err = cfg.ClientSelfCert(); err != nil {
+ if cfg.logger != nil {
+ cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err))
+ } else {
+ plog.Fatalf("could not get certs (%v)", err)
+ }
+ }
+ if cfg.EnablePprof {
+ if cfg.logger != nil {
+ cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf))
+ } else {
+ plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
+ }
+ }
+
+ sctxs = make(map[string]*serveCtx)
+ for _, u := range cfg.LCUrls {
+ sctx := newServeCtx(cfg.logger)
+ if u.Scheme == "http" || u.Scheme == "unix" {
+ if !cfg.ClientTLSInfo.Empty() {
+ if cfg.logger != nil {
+ cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String()))
+ } else {
+ plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
+ }
+ }
+ if cfg.ClientTLSInfo.ClientCertAuth {
+ if cfg.logger != nil {
+ cfg.logger.Warn("scheme is HTTP while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String()))
+ } else {
+ plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
+ }
+ }
+ }
+ if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
+ return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String())
+ }
+
+ network := "tcp"
+ addr := u.Host
+ if u.Scheme == "unix" || u.Scheme == "unixs" {
+ network = "unix"
+ addr = u.Host + u.Path
+ }
+ sctx.network = network
+
+ sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
+ sctx.insecure = !sctx.secure
+ if oldctx := sctxs[addr]; oldctx != nil {
+ oldctx.secure = oldctx.secure || sctx.secure
+ oldctx.insecure = oldctx.insecure || sctx.insecure
+ continue
+ }
+
+ if sctx.l, err = net.Listen(network, addr); err != nil {
+ return nil, err
+ }
+ // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
+ // hosts that disable ipv6. So, use the address given by the user.
+ sctx.addr = addr
+
+ if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
+ if fdLimit <= reservedInternalFDNum {
+ if cfg.logger != nil {
+ cfg.logger.Fatal(
+ "file descriptor limit of etcd process is too low; please set higher",
+ zap.Uint64("limit", fdLimit),
+ zap.Int("recommended-limit", reservedInternalFDNum),
+ )
+ } else {
+ plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
+ }
+ }
+ sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
+ }
+
+ if network == "tcp" {
+ if sctx.l, err = transport.NewKeepAliveListener(sctx.l, network, nil); err != nil {
+ return nil, err
+ }
+ }
+
+ defer func() {
+ if err == nil {
+ return
+ }
+ sctx.l.Close()
+ if cfg.logger != nil {
+ cfg.logger.Warn(
+ "closing peer listener",
+ zap.String("address", u.Host),
+ zap.Error(err),
+ )
+ } else {
+ plog.Info("stopping listening for client requests on ", u.Host)
+ }
+ }()
+ for k := range cfg.UserHandlers {
+ sctx.userHandlers[k] = cfg.UserHandlers[k]
+ }
+ sctx.serviceRegister = cfg.ServiceRegister
+ if cfg.EnablePprof || cfg.Debug {
+ sctx.registerPprof()
+ }
+ if cfg.Debug {
+ sctx.registerTrace()
+ }
+ sctxs[addr] = sctx
+ }
+ return sctxs, nil
+}
+
+func (e *Etcd) serveClients() (err error) {
+ if !e.cfg.ClientTLSInfo.Empty() {
+ if e.cfg.logger != nil {
+ e.cfg.logger.Info(
+ "starting with client TLS",
+ zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)),
+ zap.Strings("cipher-suites", e.cfg.CipherSuites),
+ )
+ } else {
+ plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
+ }
+ }
+
+ // Start a client server goroutine for each listen address
+ var h http.Handler
+ if e.Config().EnableV2 {
+ if len(e.Config().ExperimentalEnableV2V3) > 0 {
+ srv := v2v3.NewServer(e.cfg.logger, v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
+ h = v2http.NewClientHandler(e.GetLogger(), srv, e.Server.Cfg.ReqTimeout())
+ } else {
+ h = v2http.NewClientHandler(e.GetLogger(), e.Server, e.Server.Cfg.ReqTimeout())
+ }
+ } else {
+ mux := http.NewServeMux()
+ etcdhttp.HandleBasic(mux, e.Server)
+ h = mux
+ }
+
+ gopts := []grpc.ServerOption{}
+ if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: e.cfg.GRPCKeepAliveMinTime,
+ PermitWithoutStream: false,
+ }))
+ }
+ if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
+ e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: e.cfg.GRPCKeepAliveInterval,
+ Timeout: e.cfg.GRPCKeepAliveTimeout,
+ }))
+ }
+
+ // start client servers in each goroutine
+ for _, sctx := range e.sctxs {
+ go func(s *serveCtx) {
+ e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
+ }(sctx)
+ }
+ return nil
+}
+
+func (e *Etcd) serveMetrics() (err error) {
+ if e.cfg.Metrics == "extensive" {
+ grpc_prometheus.EnableHandlingTimeHistogram()
+ }
+
+ if len(e.cfg.ListenMetricsUrls) > 0 {
+ metricsMux := http.NewServeMux()
+ etcdhttp.HandleMetricsHealth(metricsMux, e.Server)
+
+ for _, murl := range e.cfg.ListenMetricsUrls {
+ tlsInfo := &e.cfg.ClientTLSInfo
+ if murl.Scheme == "http" {
+ tlsInfo = nil
+ }
+ ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsInfo)
+ if err != nil {
+ return err
+ }
+ e.metricsListeners = append(e.metricsListeners, ml)
+ go func(u url.URL, ln net.Listener) {
+ if e.cfg.logger != nil {
+ e.cfg.logger.Info(
+ "serving metrics",
+ zap.String("address", u.String()),
+ )
+ } else {
+ plog.Info("listening for metrics on ", u.String())
+ }
+ e.errHandler(http.Serve(ln, metricsMux))
+ }(murl, ml)
+ }
+ }
+ return nil
+}
+
+func (e *Etcd) errHandler(err error) {
+ select {
+ case <-e.stopc:
+ return
+ default:
+ }
+ select {
+ case <-e.stopc:
+ case e.errc <- err:
+ }
+}
+
+// GetLogger returns the logger.
+func (e *Etcd) GetLogger() *zap.Logger {
+ e.cfg.loggerMu.RLock()
+ l := e.cfg.logger
+ e.cfg.loggerMu.RUnlock()
+ return l
+}
+
+func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
+ h, err := strconv.Atoi(retention)
+ if err == nil {
+ switch mode {
+ case CompactorModeRevision:
+ ret = time.Duration(int64(h))
+ case CompactorModePeriodic:
+ ret = time.Duration(int64(h)) * time.Hour
+ }
+ } else {
+ // periodic compaction
+ ret, err = time.ParseDuration(retention)
+ if err != nil {
+ return 0, fmt.Errorf("error parsing CompactionRetention: %v", err)
+ }
+ }
+ return ret, nil
+}
diff --git a/vendor/go.etcd.io/etcd/embed/serve.go b/vendor/go.etcd.io/etcd/embed/serve.go
new file mode 100644
index 000000000000..a3b20c46c38f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/embed/serve.go
@@ -0,0 +1,435 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ defaultLog "log"
+ "net"
+ "net/http"
+ "strings"
+
+ "go.etcd.io/etcd/clientv3/credentials"
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v3client"
+ "go.etcd.io/etcd/etcdserver/api/v3election"
+ "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb"
+ v3electiongw "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw"
+ "go.etcd.io/etcd/etcdserver/api/v3lock"
+ "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb"
+ v3lockgw "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc"
+ etcdservergw "go.etcd.io/etcd/etcdserver/etcdserverpb/gw"
+ "go.etcd.io/etcd/pkg/debugutil"
+ "go.etcd.io/etcd/pkg/httputil"
+ "go.etcd.io/etcd/pkg/transport"
+
+ gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/soheilhy/cmux"
+ "github.com/tmc/grpc-websocket-proxy/wsproxy"
+ "go.uber.org/zap"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc"
+)
+
+type serveCtx struct {
+ lg *zap.Logger
+ l net.Listener
+ addr string
+ network string
+ secure bool
+ insecure bool
+
+ ctx context.Context
+ cancel context.CancelFunc
+
+ userHandlers map[string]http.Handler
+ serviceRegister func(*grpc.Server)
+ serversC chan *servers
+}
+
+type servers struct {
+ secure bool
+ grpc *grpc.Server
+ http *http.Server
+}
+
+func newServeCtx(lg *zap.Logger) *serveCtx {
+ ctx, cancel := context.WithCancel(context.Background())
+ return &serveCtx{
+ lg: lg,
+ ctx: ctx,
+ cancel: cancel,
+ userHandlers: make(map[string]http.Handler),
+ serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
+ }
+}
+
+// serve accepts incoming connections on the listener l,
+// creating a new service goroutine for each. The service goroutines
+// read requests and then call handler to reply to them.
+func (sctx *serveCtx) serve(
+ s *etcdserver.EtcdServer,
+ tlsinfo *transport.TLSInfo,
+ handler http.Handler,
+ errHandler func(error),
+ gopts ...grpc.ServerOption) (err error) {
+ logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
+ <-s.ReadyNotify()
+
+ if sctx.lg == nil {
+ plog.Info("ready to serve client requests")
+ }
+
+ m := cmux.New(sctx.l)
+ v3c := v3client.New(s)
+ servElection := v3election.NewElectionServer(v3c)
+ servLock := v3lock.NewLockServer(v3c)
+
+ var gs *grpc.Server
+ defer func() {
+ if err != nil && gs != nil {
+ gs.Stop()
+ }
+ }()
+
+ if sctx.insecure {
+ gs = v3rpc.Server(s, nil, gopts...)
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ grpcl := m.Match(cmux.HTTP2())
+ go func() { errHandler(gs.Serve(grpcl)) }()
+
+ var gwmux *gw.ServeMux
+ if s.Cfg.EnableGRPCGateway {
+ gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()})
+ if err != nil {
+ return err
+ }
+ }
+
+ httpmux := sctx.createMux(gwmux, handler)
+
+ srvhttp := &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux),
+ ErrorLog: logger, // do not log user error
+ }
+ httpl := m.Match(cmux.HTTP1())
+ go func() { errHandler(srvhttp.Serve(httpl)) }()
+
+ sctx.serversC <- &servers{grpc: gs, http: srvhttp}
+ if sctx.lg != nil {
+ sctx.lg.Info(
+ "serving client traffic insecurely; this is strongly discouraged!",
+ zap.String("address", sctx.l.Addr().String()),
+ )
+ } else {
+ plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String())
+ }
+ }
+
+ if sctx.secure {
+ tlscfg, tlsErr := tlsinfo.ServerConfig()
+ if tlsErr != nil {
+ return tlsErr
+ }
+ gs = v3rpc.Server(s, tlscfg, gopts...)
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ handler = grpcHandlerFunc(gs, handler)
+
+ var gwmux *gw.ServeMux
+ if s.Cfg.EnableGRPCGateway {
+ dtls := tlscfg.Clone()
+ // trust local server
+ dtls.InsecureSkipVerify = true
+ bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls})
+ opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())}
+ gwmux, err = sctx.registerGateway(opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ var tlsl net.Listener
+ tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
+ if err != nil {
+ return err
+ }
+ // TODO: add debug flag; enable logging when debug flag is set
+ httpmux := sctx.createMux(gwmux, handler)
+
+ srv := &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux),
+ TLSConfig: tlscfg,
+ ErrorLog: logger, // do not log user error
+ }
+ go func() { errHandler(srv.Serve(tlsl)) }()
+
+ sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
+ if sctx.lg != nil {
+ sctx.lg.Info(
+ "serving client traffic securely",
+ zap.String("address", sctx.l.Addr().String()),
+ )
+ } else {
+ plog.Infof("serving client requests on %s", sctx.l.Addr().String())
+ }
+ }
+
+ close(sctx.serversC)
+ return m.Serve()
+}
+
+// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
+// connections or otherHandler otherwise. Given in gRPC docs.
+func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler {
+ if otherHandler == nil {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ grpcServer.ServeHTTP(w, r)
+ })
+ }
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
+ grpcServer.ServeHTTP(w, r)
+ } else {
+ otherHandler.ServeHTTP(w, r)
+ }
+ })
+}
+
+type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
+
+func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
+ ctx := sctx.ctx
+
+ addr := sctx.addr
+ if network := sctx.network; network == "unix" {
+ // explicitly define unix network for gRPC socket support
+ addr = fmt.Sprintf("%s://%s", network, addr)
+ }
+
+ conn, err := grpc.DialContext(ctx, addr, opts...)
+ if err != nil {
+ return nil, err
+ }
+ gwmux := gw.NewServeMux()
+
+ handlers := []registerHandlerFunc{
+ etcdservergw.RegisterKVHandler,
+ etcdservergw.RegisterWatchHandler,
+ etcdservergw.RegisterLeaseHandler,
+ etcdservergw.RegisterClusterHandler,
+ etcdservergw.RegisterMaintenanceHandler,
+ etcdservergw.RegisterAuthHandler,
+ v3lockgw.RegisterLockHandler,
+ v3electiongw.RegisterElectionHandler,
+ }
+ for _, h := range handlers {
+ if err := h(ctx, gwmux, conn); err != nil {
+ return nil, err
+ }
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ if sctx.lg != nil {
+ sctx.lg.Warn(
+ "failed to close connection",
+ zap.String("address", sctx.l.Addr().String()),
+ zap.Error(cerr),
+ )
+ } else {
+ plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr)
+ }
+ }
+ }()
+
+ return gwmux, nil
+}
+
+func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
+ httpmux := http.NewServeMux()
+ for path, h := range sctx.userHandlers {
+ httpmux.Handle(path, h)
+ }
+
+ if gwmux != nil {
+ httpmux.Handle(
+ "/v3/",
+ wsproxy.WebsocketProxy(
+ gwmux,
+ wsproxy.WithRequestMutator(
+ // Default to the POST method for streams
+ func(_ *http.Request, outgoing *http.Request) *http.Request {
+ outgoing.Method = "POST"
+ return outgoing
+ },
+ ),
+ ),
+ )
+ }
+ if handler != nil {
+ httpmux.Handle("/", handler)
+ }
+ return httpmux
+}
+
+// createAccessController wraps HTTP multiplexer:
+// - mutate gRPC gateway request paths
+// - check hostname whitelist
+// client HTTP requests goes here first
+func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler {
+ return &accessController{lg: lg, s: s, mux: mux}
+}
+
+type accessController struct {
+ lg *zap.Logger
+ s *etcdserver.EtcdServer
+ mux *http.ServeMux
+}
+
+func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ // redirect for backward compatibilities
+ if req != nil && req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3beta/") {
+ req.URL.Path = strings.Replace(req.URL.Path, "/v3beta/", "/v3/", 1)
+ }
+
+ if req.TLS == nil { // check origin if client connection is not secure
+ host := httputil.GetHostname(req)
+ if !ac.s.AccessController.IsHostWhitelisted(host) {
+ if ac.lg != nil {
+ ac.lg.Warn(
+ "rejecting HTTP request to prevent DNS rebinding attacks",
+ zap.String("host", host),
+ )
+ } else {
+ plog.Warningf("rejecting HTTP request from %q to prevent DNS rebinding attacks", host)
+ }
+ // TODO: use Go's "http.StatusMisdirectedRequest" (421)
+ // https://github.com/golang/go/commit/4b8a7eafef039af1834ef9bfa879257c4a72b7b5
+ http.Error(rw, errCVE20185702(host), 421)
+ return
+ }
+ } else if ac.s.Cfg.ClientCertAuthEnabled && ac.s.Cfg.EnableGRPCGateway &&
+ ac.s.AuthStore().IsAuthEnabled() && strings.HasPrefix(req.URL.Path, "/v3/") {
+ for _, chains := range req.TLS.VerifiedChains {
+ if len(chains) < 1 {
+ continue
+ }
+ if len(chains[0].Subject.CommonName) != 0 {
+ http.Error(rw, "CommonName of client sending a request against gateway will be ignored and not used as expected", 400)
+ return
+ }
+ }
+ }
+
+ // Write CORS header.
+ if ac.s.AccessController.OriginAllowed("*") {
+ addCORSHeader(rw, "*")
+ } else if origin := req.Header.Get("Origin"); ac.s.OriginAllowed(origin) {
+ addCORSHeader(rw, origin)
+ }
+
+ if req.Method == "OPTIONS" {
+ rw.WriteHeader(http.StatusOK)
+ return
+ }
+
+ ac.mux.ServeHTTP(rw, req)
+}
+
+// addCORSHeader adds the correct cors headers given an origin
+func addCORSHeader(w http.ResponseWriter, origin string) {
+ w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
+ w.Header().Add("Access-Control-Allow-Origin", origin)
+ w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization")
+}
+
+// https://github.com/transmission/transmission/pull/468
+func errCVE20185702(host string) string {
+ return fmt.Sprintf(`
+etcd received your request, but the Host header was unrecognized.
+
+To fix this, choose one of the following options:
+- Enable TLS, then any HTTPS request will be allowed.
+- Add the hostname you want to use to the whitelist in settings.
+ - e.g. etcd --host-whitelist %q
+
+This requirement has been added to help prevent "DNS Rebinding" attacks (CVE-2018-5702).
+`, host)
+}
+
+// WrapCORS wraps existing handler with CORS.
+// TODO: deprecate this after v2 proxy deprecate
+func WrapCORS(cors map[string]struct{}, h http.Handler) http.Handler {
+ return &corsHandler{
+ ac: &etcdserver.AccessController{CORS: cors},
+ h: h,
+ }
+}
+
+type corsHandler struct {
+ ac *etcdserver.AccessController
+ h http.Handler
+}
+
+func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if ch.ac.OriginAllowed("*") {
+ addCORSHeader(rw, "*")
+ } else if origin := req.Header.Get("Origin"); ch.ac.OriginAllowed(origin) {
+ addCORSHeader(rw, origin)
+ }
+
+ if req.Method == "OPTIONS" {
+ rw.WriteHeader(http.StatusOK)
+ return
+ }
+
+ ch.h.ServeHTTP(rw, req)
+}
+
+func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
+ if sctx.userHandlers[s] != nil {
+ if sctx.lg != nil {
+ sctx.lg.Warn("path is already registered by user handler", zap.String("path", s))
+ } else {
+ plog.Warningf("path %s already registered by user handler", s)
+ }
+ return
+ }
+ sctx.userHandlers[s] = h
+}
+
+func (sctx *serveCtx) registerPprof() {
+ for p, h := range debugutil.PProfHandlers() {
+ sctx.registerUserHandler(p, h)
+ }
+}
+
+func (sctx *serveCtx) registerTrace() {
+ reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) }
+ sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf))
+ evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) }
+ sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf))
+}
diff --git a/vendor/go.etcd.io/etcd/embed/util.go b/vendor/go.etcd.io/etcd/embed/util.go
new file mode 100644
index 000000000000..40f3ce9d5955
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/embed/util.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "path/filepath"
+
+ "go.etcd.io/etcd/wal"
+)
+
+func isMemberInitialized(cfg *Config) bool {
+ waldir := cfg.WalDir
+ if waldir == "" {
+ waldir = filepath.Join(cfg.Dir, "member", "wal")
+ }
+ return wal.Exist(waldir)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/capability.go b/vendor/go.etcd.io/etcd/etcdserver/api/capability.go
new file mode 100644
index 000000000000..8b13f4742950
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/capability.go
@@ -0,0 +1,98 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "sync"
+
+ "go.etcd.io/etcd/version"
+ "go.uber.org/zap"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/coreos/pkg/capnslog"
+)
+
+type Capability string
+
+const (
+ AuthCapability Capability = "auth"
+ V3rpcCapability Capability = "v3rpc"
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api")
+
+ // capabilityMaps is a static map of version to capability map.
+ capabilityMaps = map[string]map[Capability]bool{
+ "3.0.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.1.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.2.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.3.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.4.0": {AuthCapability: true, V3rpcCapability: true},
+ }
+
+ enableMapMu sync.RWMutex
+ // enabledMap points to a map in capabilityMaps
+ enabledMap map[Capability]bool
+
+ curVersion *semver.Version
+)
+
+func init() {
+ enabledMap = map[Capability]bool{
+ AuthCapability: true,
+ V3rpcCapability: true,
+ }
+}
+
+// UpdateCapability updates the enabledMap when the cluster version increases.
+func UpdateCapability(lg *zap.Logger, v *semver.Version) {
+ if v == nil {
+ // if recovered but version was never set by cluster
+ return
+ }
+ enableMapMu.Lock()
+ if curVersion != nil && !curVersion.LessThan(*v) {
+ enableMapMu.Unlock()
+ return
+ }
+ curVersion = v
+ enabledMap = capabilityMaps[curVersion.String()]
+ enableMapMu.Unlock()
+
+ if lg != nil {
+ lg.Info(
+ "enabled capabilities for version",
+ zap.String("cluster-version", version.Cluster(v.String())),
+ )
+ } else {
+ plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
+ }
+}
+
+func IsCapabilityEnabled(c Capability) bool {
+ enableMapMu.RLock()
+ defer enableMapMu.RUnlock()
+ if enabledMap == nil {
+ return false
+ }
+ return enabledMap[c]
+}
+
+func EnableCapability(c Capability) {
+ enableMapMu.Lock()
+ defer enableMapMu.Unlock()
+ enabledMap[c] = true
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/cluster.go b/vendor/go.etcd.io/etcd/etcdserver/api/cluster.go
new file mode 100644
index 000000000000..901be9d85ca7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/cluster.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/coreos/go-semver/semver"
+)
+
+// Cluster is an interface representing a collection of members in one etcd cluster.
+type Cluster interface {
+ // ID returns the cluster ID
+ ID() types.ID
+ // ClientURLs returns an aggregate set of all URLs on which this
+ // cluster is listening for client requests
+ ClientURLs() []string
+ // Members returns a slice of members sorted by their ID
+ Members() []*membership.Member
+ // Member retrieves a particular member based on ID, or nil if the
+ // member does not exist in the cluster
+ Member(id types.ID) *membership.Member
+ // Version is the cluster-wide minimum major.minor version.
+ Version() *semver.Version
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/doc.go
new file mode 100644
index 000000000000..f44881be663e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package api manages the capabilities and features that are exposed to clients by the etcd cluster.
+package api
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/base.go b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/base.go
new file mode 100644
index 000000000000..c9df62ea8e60
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/base.go
@@ -0,0 +1,203 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "encoding/json"
+ "expvar"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+ "go.etcd.io/etcd/etcdserver/api/v2http/httptypes"
+ "go.etcd.io/etcd/pkg/logutil"
+ "go.etcd.io/etcd/version"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.uber.org/zap"
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api/etcdhttp")
+ mlog = logutil.NewMergeLogger(plog)
+)
+
+const (
+ configPath = "/config"
+ varsPath = "/debug/vars"
+ versionPath = "/version"
+)
+
+// HandleBasic adds handlers to a mux for serving JSON etcd client requests
+// that do not access the v2 store.
+func HandleBasic(mux *http.ServeMux, server etcdserver.ServerPeer) {
+ mux.HandleFunc(varsPath, serveVars)
+
+ // TODO: deprecate '/config/local/log' in v3.5
+ mux.HandleFunc(configPath+"/local/log", logHandleFunc)
+
+ HandleMetricsHealth(mux, server)
+ mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
+}
+
+func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ v := c.Version()
+ if v != nil {
+ fn(w, r, v.String())
+ } else {
+ fn(w, r, "not_decided")
+ }
+ }
+}
+
+func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+ vs := version.Versions{
+ Server: version.Version,
+ Cluster: clusterV,
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ b, err := json.Marshal(&vs)
+ if err != nil {
+ plog.Panicf("cannot marshal versions to json (%v)", err)
+ }
+ w.Write(b)
+}
+
+// TODO: deprecate '/config/local/log' in v3.5
+func logHandleFunc(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "PUT") {
+ return
+ }
+
+ in := struct{ Level string }{}
+
+ d := json.NewDecoder(r.Body)
+ if err := d.Decode(&in); err != nil {
+ WriteError(nil, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
+ return
+ }
+
+ logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
+ if err != nil {
+ WriteError(nil, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
+ return
+ }
+
+ plog.Noticef("globalLogLevel set to %q", logl.String())
+ capnslog.SetGlobalLogLevel(logl)
+ w.WriteHeader(http.StatusNoContent)
+}
+
+func serveVars(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ fmt.Fprintf(w, "{\n")
+ first := true
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprintf(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprintf(w, "\n}\n")
+}
+
+func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
+ if m == r.Method {
+ return true
+ }
+ w.Header().Set("Allow", m)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return false
+}
+
+// WriteError logs and writes the given Error to the ResponseWriter
+// If Error is an etcdErr, it is rendered to the ResponseWriter
+// Otherwise, it is assumed to be a StatusInternalServerError
+func WriteError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) {
+ if err == nil {
+ return
+ }
+ switch e := err.(type) {
+ case *v2error.Error:
+ e.WriteTo(w)
+
+ case *httptypes.HTTPError:
+ if et := e.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", e.Error()),
+ zap.Error(et),
+ )
+ } else {
+ plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
+ }
+ }
+
+ default:
+ switch err {
+ case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers,
+ etcdserver.ErrUnhealthy:
+ if lg != nil {
+ lg.Warn(
+ "v2 response error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", err.Error()),
+ )
+ } else {
+ mlog.MergeError(err)
+ }
+
+ default:
+ if lg != nil {
+ lg.Warn(
+ "unexpected v2 response error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", err.Error()),
+ )
+ } else {
+ mlog.MergeErrorf("got unexpected response error (%v)", err)
+ }
+ }
+
+ herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
+ if et := herr.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", err.Error()),
+ zap.Error(et),
+ )
+ } else {
+ plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
+ }
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/doc.go
new file mode 100644
index 000000000000..a03b626204f7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package etcdhttp implements HTTP transportation layer for etcdserver.
+package etcdhttp
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/metrics.go
new file mode 100644
index 000000000000..07ec8ec3bd96
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/metrics.go
@@ -0,0 +1,130 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/raft"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+const (
+ PathMetrics = "/metrics"
+ PathHealth = "/health"
+)
+
+// HandleMetricsHealth registers metrics and health handlers.
+func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
+ mux.Handle(PathMetrics, promhttp.Handler())
+ mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
+}
+
+// HandlePrometheus registers prometheus handler on '/metrics'.
+func HandlePrometheus(mux *http.ServeMux) {
+ mux.Handle(PathMetrics, promhttp.Handler())
+}
+
+// NewHealthHandler handles '/health' requests.
+func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ plog.Warningf("/health error (status code %d)", http.StatusMethodNotAllowed)
+ return
+ }
+ h := hfunc()
+ d, _ := json.Marshal(h)
+ if h.Health != "true" {
+ http.Error(w, string(d), http.StatusServiceUnavailable)
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+ w.Write(d)
+ }
+}
+
+var (
+ healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "health_success",
+ Help: "The total number of successful health checks",
+ })
+ healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "health_failures",
+ Help: "The total number of failed health checks",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(healthSuccess)
+ prometheus.MustRegister(healthFailed)
+}
+
+// Health defines etcd server health status.
+// TODO: remove manual parsing in etcdctl cluster-health
+type Health struct {
+ Health string `json:"health"`
+}
+
+// TODO: server NOSPACE, etcdserver.ErrNoLeader in health API
+
+func checkHealth(srv etcdserver.ServerV2) Health {
+ h := Health{Health: "true"}
+
+ as := srv.Alarms()
+ if len(as) > 0 {
+ h.Health = "false"
+ for _, v := range as {
+ plog.Warningf("/health error due to an alarm %s", v.String())
+ }
+ }
+
+ if h.Health == "true" {
+ if uint64(srv.Leader()) == raft.None {
+ h.Health = "false"
+ plog.Warningf("/health error; no leader (status code %d)", http.StatusServiceUnavailable)
+ }
+ }
+
+ if h.Health == "true" {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ _, err := srv.Do(ctx, etcdserverpb.Request{Method: "QGET"})
+ cancel()
+ if err != nil {
+ h.Health = "false"
+ plog.Warningf("/health error; QGET failed %v (status code %d)", err, http.StatusServiceUnavailable)
+ }
+ }
+
+ if h.Health == "true" {
+ healthSuccess.Inc()
+ plog.Infof("/health OK (status code %d)", http.StatusOK)
+ } else {
+ healthFailed.Inc()
+ }
+ return h
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/peer.go b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/peer.go
new file mode 100644
index 000000000000..2d13741c68bd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/etcdhttp/peer.go
@@ -0,0 +1,168 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/lease/leasehttp"
+ "go.etcd.io/etcd/pkg/types"
+
+ "go.uber.org/zap"
+)
+
+const (
+ peerMembersPath = "/members"
+ peerMemberPromotePrefix = "/members/promote/"
+)
+
+// NewPeerHandler generates an http.Handler to handle etcd peer requests.
+func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeerV2) http.Handler {
+ return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler(), s.HashKVHandler())
+}
+
+func newPeerHandler(
+ lg *zap.Logger,
+ s etcdserver.Server,
+ raftHandler http.Handler,
+ leaseHandler http.Handler,
+ hashKVHandler http.Handler,
+) http.Handler {
+ peerMembersHandler := newPeerMembersHandler(lg, s.Cluster())
+ peerMemberPromoteHandler := newPeerMemberPromoteHandler(lg, s)
+
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", http.NotFound)
+ mux.Handle(rafthttp.RaftPrefix, raftHandler)
+ mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
+ mux.Handle(peerMembersPath, peerMembersHandler)
+ mux.Handle(peerMemberPromotePrefix, peerMemberPromoteHandler)
+ if leaseHandler != nil {
+ mux.Handle(leasehttp.LeasePrefix, leaseHandler)
+ mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
+ }
+ if hashKVHandler != nil {
+ mux.Handle(etcdserver.PeerHashKVPath, hashKVHandler)
+ }
+ mux.HandleFunc(versionPath, versionHandler(s.Cluster(), serveVersion))
+ return mux
+}
+
+func newPeerMembersHandler(lg *zap.Logger, cluster api.Cluster) http.Handler {
+ return &peerMembersHandler{
+ lg: lg,
+ cluster: cluster,
+ }
+}
+
+type peerMembersHandler struct {
+ lg *zap.Logger
+ cluster api.Cluster
+}
+
+func newPeerMemberPromoteHandler(lg *zap.Logger, s etcdserver.Server) http.Handler {
+ return &peerMemberPromoteHandler{
+ lg: lg,
+ cluster: s.Cluster(),
+ server: s,
+ }
+}
+
+type peerMemberPromoteHandler struct {
+ lg *zap.Logger
+ cluster api.Cluster
+ server etcdserver.Server
+}
+
+func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ if r.URL.Path != peerMembersPath {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+ ms := h.cluster.Members()
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(ms); err != nil {
+ if h.lg != nil {
+ h.lg.Warn("failed to encode membership members", zap.Error(err))
+ } else {
+ plog.Warningf("failed to encode members response (%v)", err)
+ }
+ }
+}
+
+func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "POST") {
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ if !strings.HasPrefix(r.URL.Path, peerMemberPromotePrefix) {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+ idStr := strings.TrimPrefix(r.URL.Path, peerMemberPromotePrefix)
+ id, err := strconv.ParseUint(idStr, 10, 64)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("member %s not found in cluster", idStr), http.StatusNotFound)
+ return
+ }
+
+ resp, err := h.server.PromoteMember(r.Context(), id)
+ if err != nil {
+ switch err {
+ case membership.ErrIDNotFound:
+ http.Error(w, err.Error(), http.StatusNotFound)
+ case membership.ErrMemberNotLearner:
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ case etcdserver.ErrLearnerNotReady:
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ default:
+ WriteError(h.lg, w, r, err)
+ }
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to promote a member",
+ zap.String("member-id", types.ID(id).String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("error promoting member %s (%v)", types.ID(id).String(), err)
+ }
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ if err := json.NewEncoder(w).Encode(resp); err != nil {
+ if h.lg != nil {
+ h.lg.Warn("failed to encode members response", zap.Error(err))
+ } else {
+ plog.Warningf("failed to encode members response (%v)", err)
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/membership/cluster.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/cluster.go
new file mode 100644
index 000000000000..d1cf220dd691
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/cluster.go
@@ -0,0 +1,840 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha1"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "path"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/v2store"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/pkg/netutil"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft"
+ "go.etcd.io/etcd/raft/raftpb"
+ "go.etcd.io/etcd/version"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap"
+)
+
+const maxLearners = 1
+
+// RaftCluster is a list of Members that belong to the same raft cluster
+type RaftCluster struct {
+ lg *zap.Logger
+
+ localID types.ID
+ cid types.ID
+ token string
+
+ v2store v2store.Store
+ be backend.Backend
+
+ sync.Mutex // guards the fields below
+ version *semver.Version
+ members map[types.ID]*Member
+ // removed contains the ids of removed members in the cluster.
+ // removed id cannot be reused.
+ removed map[types.ID]bool
+}
+
+// ConfigChangeContext represents a context for confChange.
+type ConfigChangeContext struct {
+ Member
+ // IsPromote indicates if the config change is for promoting a learner member.
+ // This flag is needed because both adding a new member and promoting a learner member
+ // uses the same config change type 'ConfChangeAddNode'.
+ IsPromote bool `json:"isPromote"`
+}
+
+// NewClusterFromURLsMap creates a new raft cluster using provided urls map. Currently, it does not support creating
+// cluster with raft learner member.
+func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap) (*RaftCluster, error) {
+ c := NewCluster(lg, token)
+ for name, urls := range urlsmap {
+ m := NewMember(name, urls, token, nil)
+ if _, ok := c.members[m.ID]; ok {
+ return nil, fmt.Errorf("member exists with identical ID %v", m)
+ }
+ if uint64(m.ID) == raft.None {
+ return nil, fmt.Errorf("cannot use %x as member id", raft.None)
+ }
+ c.members[m.ID] = m
+ }
+ c.genID()
+ return c, nil
+}
+
+func NewClusterFromMembers(lg *zap.Logger, token string, id types.ID, membs []*Member) *RaftCluster {
+ c := NewCluster(lg, token)
+ c.cid = id
+ for _, m := range membs {
+ c.members[m.ID] = m
+ }
+ return c
+}
+
+func NewCluster(lg *zap.Logger, token string) *RaftCluster {
+ return &RaftCluster{
+ lg: lg,
+ token: token,
+ members: make(map[types.ID]*Member),
+ removed: make(map[types.ID]bool),
+ }
+}
+
+func (c *RaftCluster) ID() types.ID { return c.cid }
+
+func (c *RaftCluster) Members() []*Member {
+ c.Lock()
+ defer c.Unlock()
+ var ms MembersByID
+ for _, m := range c.members {
+ ms = append(ms, m.Clone())
+ }
+ sort.Sort(ms)
+ return []*Member(ms)
+}
+
+func (c *RaftCluster) Member(id types.ID) *Member {
+ c.Lock()
+ defer c.Unlock()
+ return c.members[id].Clone()
+}
+
+func (c *RaftCluster) VotingMembers() []*Member {
+ c.Lock()
+ defer c.Unlock()
+ var ms MembersByID
+ for _, m := range c.members {
+ if !m.IsLearner {
+ ms = append(ms, m.Clone())
+ }
+ }
+ sort.Sort(ms)
+ return []*Member(ms)
+}
+
+// MemberByName returns a Member with the given name if exists.
+// If more than one member has the given name, it will panic.
+func (c *RaftCluster) MemberByName(name string) *Member {
+ c.Lock()
+ defer c.Unlock()
+ var memb *Member
+ for _, m := range c.members {
+ if m.Name == name {
+ if memb != nil {
+ if c.lg != nil {
+ c.lg.Panic("two member with same name found", zap.String("name", name))
+ } else {
+ plog.Panicf("two members with the given name %q exist", name)
+ }
+ }
+ memb = m
+ }
+ }
+ return memb.Clone()
+}
+
+func (c *RaftCluster) MemberIDs() []types.ID {
+ c.Lock()
+ defer c.Unlock()
+ var ids []types.ID
+ for _, m := range c.members {
+ ids = append(ids, m.ID)
+ }
+ sort.Sort(types.IDSlice(ids))
+ return ids
+}
+
+func (c *RaftCluster) IsIDRemoved(id types.ID) bool {
+ c.Lock()
+ defer c.Unlock()
+ return c.removed[id]
+}
+
+// PeerURLs returns a list of all peer addresses.
+// The returned list is sorted in ascending lexicographical order.
+func (c *RaftCluster) PeerURLs() []string {
+ c.Lock()
+ defer c.Unlock()
+ urls := make([]string, 0)
+ for _, p := range c.members {
+ urls = append(urls, p.PeerURLs...)
+ }
+ sort.Strings(urls)
+ return urls
+}
+
+// ClientURLs returns a list of all client addresses.
+// The returned list is sorted in ascending lexicographical order.
+func (c *RaftCluster) ClientURLs() []string {
+ c.Lock()
+ defer c.Unlock()
+ urls := make([]string, 0)
+ for _, p := range c.members {
+ urls = append(urls, p.ClientURLs...)
+ }
+ sort.Strings(urls)
+ return urls
+}
+
+func (c *RaftCluster) String() string {
+ c.Lock()
+ defer c.Unlock()
+ b := &bytes.Buffer{}
+ fmt.Fprintf(b, "{ClusterID:%s ", c.cid)
+ var ms []string
+ for _, m := range c.members {
+ ms = append(ms, fmt.Sprintf("%+v", m))
+ }
+ fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " "))
+ var ids []string
+ for id := range c.removed {
+ ids = append(ids, id.String())
+ }
+ fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " "))
+ return b.String()
+}
+
+func (c *RaftCluster) genID() {
+ mIDs := c.MemberIDs()
+ b := make([]byte, 8*len(mIDs))
+ for i, id := range mIDs {
+ binary.BigEndian.PutUint64(b[8*i:], uint64(id))
+ }
+ hash := sha1.Sum(b)
+ c.cid = types.ID(binary.BigEndian.Uint64(hash[:8]))
+}
+
+func (c *RaftCluster) SetID(localID, cid types.ID) {
+ c.localID = localID
+ c.cid = cid
+}
+
+func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st }
+
+func (c *RaftCluster) SetBackend(be backend.Backend) {
+ c.be = be
+ mustCreateBackendBuckets(c.be)
+}
+
+func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.members, c.removed = membersFromStore(c.lg, c.v2store)
+ c.version = clusterVersionFromStore(c.lg, c.v2store)
+ mustDetectDowngrade(c.lg, c.version)
+ onSet(c.lg, c.version)
+
+ for _, m := range c.members {
+ if c.lg != nil {
+ c.lg.Info(
+ "recovered/added member from store",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("recovered-remote-peer-id", m.ID.String()),
+ zap.Strings("recovered-remote-peer-urls", m.PeerURLs),
+ )
+ } else {
+ plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.cid)
+ }
+ }
+ if c.version != nil {
+ if c.lg != nil {
+ c.lg.Info(
+ "set cluster version from store",
+ zap.String("cluster-version", version.Cluster(c.version.String())),
+ )
+ } else {
+ plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String()))
+ }
+ }
+}
+
+// ValidateConfigurationChange takes a proposed ConfChange and
+// ensures that it is still valid.
+func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
+ members, removed := membersFromStore(c.lg, c.v2store)
+ id := types.ID(cc.NodeID)
+ if removed[id] {
+ return ErrIDRemoved
+ }
+ switch cc.Type {
+ case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode:
+ confChangeContext := new(ConfigChangeContext)
+ if err := json.Unmarshal(cc.Context, confChangeContext); err != nil {
+ if c.lg != nil {
+ c.lg.Panic("failed to unmarshal confChangeContext", zap.Error(err))
+ } else {
+ plog.Panicf("unmarshal confChangeContext should never fail: %v", err)
+ }
+ }
+
+ if confChangeContext.IsPromote { // promoting a learner member to voting member
+ if members[id] == nil {
+ return ErrIDNotFound
+ }
+ if !members[id].IsLearner {
+ return ErrMemberNotLearner
+ }
+ } else { // adding a new member
+ if members[id] != nil {
+ return ErrIDExists
+ }
+
+ urls := make(map[string]bool)
+ for _, m := range members {
+ for _, u := range m.PeerURLs {
+ urls[u] = true
+ }
+ }
+ for _, u := range confChangeContext.Member.PeerURLs {
+ if urls[u] {
+ return ErrPeerURLexists
+ }
+ }
+
+ if confChangeContext.Member.IsLearner { // the new member is a learner
+ numLearners := 0
+ for _, m := range members {
+ if m.IsLearner {
+ numLearners++
+ }
+ }
+ if numLearners+1 > maxLearners {
+ return ErrTooManyLearners
+ }
+ }
+ }
+ case raftpb.ConfChangeRemoveNode:
+ if members[id] == nil {
+ return ErrIDNotFound
+ }
+
+ case raftpb.ConfChangeUpdateNode:
+ if members[id] == nil {
+ return ErrIDNotFound
+ }
+ urls := make(map[string]bool)
+ for _, m := range members {
+ if m.ID == id {
+ continue
+ }
+ for _, u := range m.PeerURLs {
+ urls[u] = true
+ }
+ }
+ m := new(Member)
+ if err := json.Unmarshal(cc.Context, m); err != nil {
+ if c.lg != nil {
+ c.lg.Panic("failed to unmarshal member", zap.Error(err))
+ } else {
+ plog.Panicf("unmarshal member should never fail: %v", err)
+ }
+ }
+ for _, u := range m.PeerURLs {
+ if urls[u] {
+ return ErrPeerURLexists
+ }
+ }
+
+ default:
+ if c.lg != nil {
+ c.lg.Panic("unknown ConfChange type", zap.String("type", cc.Type.String()))
+ } else {
+ plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode")
+ }
+ }
+ return nil
+}
+
+// AddMember adds a new Member into the cluster, and saves the given member's
+// raftAttributes into the store. The given member should have empty attributes.
+// A Member with a matching id must not exist.
+func (c *RaftCluster) AddMember(m *Member) {
+ c.Lock()
+ defer c.Unlock()
+ if c.v2store != nil {
+ mustSaveMemberToStore(c.v2store, m)
+ }
+ if c.be != nil {
+ mustSaveMemberToBackend(c.be, m)
+ }
+
+ c.members[m.ID] = m
+
+ if c.lg != nil {
+ c.lg.Info(
+ "added member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("added-peer-id", m.ID.String()),
+ zap.Strings("added-peer-peer-urls", m.PeerURLs),
+ )
+ } else {
+ plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.cid)
+ }
+}
+
+// RemoveMember removes a member from the store.
+// The given id MUST exist, or the function panics.
+func (c *RaftCluster) RemoveMember(id types.ID) {
+ c.Lock()
+ defer c.Unlock()
+ if c.v2store != nil {
+ mustDeleteMemberFromStore(c.v2store, id)
+ }
+ if c.be != nil {
+ mustDeleteMemberFromBackend(c.be, id)
+ }
+
+ m, ok := c.members[id]
+ delete(c.members, id)
+ c.removed[id] = true
+
+ if c.lg != nil {
+ if ok {
+ c.lg.Info(
+ "removed member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ zap.Strings("removed-remote-peer-urls", m.PeerURLs),
+ )
+ } else {
+ c.lg.Warn(
+ "skipped removing already removed member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ )
+ }
+ } else {
+ plog.Infof("removed member %s from cluster %s", id, c.cid)
+ }
+}
+
+func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
+ c.Lock()
+ defer c.Unlock()
+
+ if m, ok := c.members[id]; ok {
+ m.Attributes = attr
+ if c.v2store != nil {
+ mustUpdateMemberAttrInStore(c.v2store, m)
+ }
+ if c.be != nil {
+ mustSaveMemberToBackend(c.be, m)
+ }
+ return
+ }
+
+ _, ok := c.removed[id]
+ if !ok {
+ if c.lg != nil {
+ c.lg.Panic(
+ "failed to update; member unknown",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("unknown-remote-peer-id", id.String()),
+ )
+ } else {
+ plog.Panicf("error updating attributes of unknown member %s", id)
+ }
+ }
+
+ if c.lg != nil {
+ c.lg.Warn(
+ "skipped attributes update of removed member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("updated-peer-id", id.String()),
+ )
+ } else {
+ plog.Warningf("skipped updating attributes of removed member %s", id)
+ }
+}
+
+// PromoteMember marks the member's IsLearner RaftAttributes to false.
+func (c *RaftCluster) PromoteMember(id types.ID) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.members[id].RaftAttributes.IsLearner = false
+ if c.v2store != nil {
+ mustUpdateMemberInStore(c.v2store, c.members[id])
+ }
+ if c.be != nil {
+ mustSaveMemberToBackend(c.be, c.members[id])
+ }
+
+ if c.lg != nil {
+ c.lg.Info(
+ "promote member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ } else {
+ plog.Noticef("promote member %s in cluster %s", id, c.cid)
+ }
+}
+
+func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.members[id].RaftAttributes = raftAttr
+ if c.v2store != nil {
+ mustUpdateMemberInStore(c.v2store, c.members[id])
+ }
+ if c.be != nil {
+ mustSaveMemberToBackend(c.be, c.members[id])
+ }
+
+ if c.lg != nil {
+ c.lg.Info(
+ "updated member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs),
+ )
+ } else {
+ plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.cid)
+ }
+}
+
+func (c *RaftCluster) Version() *semver.Version {
+ c.Lock()
+ defer c.Unlock()
+ if c.version == nil {
+ return nil
+ }
+ return semver.Must(semver.NewVersion(c.version.String()))
+}
+
+func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *semver.Version)) {
+ c.Lock()
+ defer c.Unlock()
+ if c.version != nil {
+ if c.lg != nil {
+ c.lg.Info(
+ "updated cluster version",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("from", version.Cluster(c.version.String())),
+ zap.String("from", version.Cluster(ver.String())),
+ )
+ } else {
+ plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String()))
+ }
+ } else {
+ if c.lg != nil {
+ c.lg.Info(
+ "set initial cluster version",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("cluster-version", version.Cluster(ver.String())),
+ )
+ } else {
+ plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String()))
+ }
+ }
+ oldVer := c.version
+ c.version = ver
+ mustDetectDowngrade(c.lg, c.version)
+ if c.v2store != nil {
+ mustSaveClusterVersionToStore(c.v2store, ver)
+ }
+ if c.be != nil {
+ mustSaveClusterVersionToBackend(c.be, ver)
+ }
+ if oldVer != nil {
+ ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0)
+ }
+ ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1)
+ onSet(c.lg, ver)
+}
+
+func (c *RaftCluster) IsReadyToAddVotingMember() bool {
+ nmembers := 1
+ nstarted := 0
+
+ for _, member := range c.VotingMembers() {
+ if member.IsStarted() {
+ nstarted++
+ }
+ nmembers++
+ }
+
+ if nstarted == 1 && nmembers == 2 {
+ // a case of adding a new node to 1-member cluster for restoring cluster data
+ // https://github.com/etcd-io/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster
+ if c.lg != nil {
+ c.lg.Debug("number of started member is 1; can accept add member request")
+ } else {
+ plog.Debugf("The number of started member is 1. This cluster can accept add member request.")
+ }
+ return true
+ }
+
+ nquorum := nmembers/2 + 1
+ if nstarted < nquorum {
+ if c.lg != nil {
+ c.lg.Warn(
+ "rejecting member add; started member will be less than quorum",
+ zap.Int("number-of-started-member", nstarted),
+ zap.Int("quorum", nquorum),
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ } else {
+ plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
+ }
+ return false
+ }
+
+ return true
+}
+
+func (c *RaftCluster) IsReadyToRemoveVotingMember(id uint64) bool {
+ nmembers := 0
+ nstarted := 0
+
+ for _, member := range c.VotingMembers() {
+ if uint64(member.ID) == id {
+ continue
+ }
+
+ if member.IsStarted() {
+ nstarted++
+ }
+ nmembers++
+ }
+
+ nquorum := nmembers/2 + 1
+ if nstarted < nquorum {
+ if c.lg != nil {
+ c.lg.Warn(
+ "rejecting member remove; started member will be less than quorum",
+ zap.Int("number-of-started-member", nstarted),
+ zap.Int("quorum", nquorum),
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ } else {
+ plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
+ }
+ return false
+ }
+
+ return true
+}
+
+func (c *RaftCluster) IsReadyToPromoteMember(id uint64) bool {
+ nmembers := 1 // We count the learner to be promoted for the future quorum
+ nstarted := 1 // and we also count it as started.
+
+ for _, member := range c.VotingMembers() {
+ if member.IsStarted() {
+ nstarted++
+ }
+ nmembers++
+ }
+
+ nquorum := nmembers/2 + 1
+ if nstarted < nquorum {
+ if c.lg != nil {
+ c.lg.Warn(
+ "rejecting member promote; started member will be less than quorum",
+ zap.Int("number-of-started-member", nstarted),
+ zap.Int("quorum", nquorum),
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ } else {
+ plog.Warningf("Reject promote member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
+ }
+ return false
+ }
+
+ return true
+}
+
+func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, map[types.ID]bool) {
+ members := make(map[types.ID]*Member)
+ removed := make(map[types.ID]bool)
+ e, err := st.Get(StoreMembersPrefix, true, true)
+ if err != nil {
+ if isKeyNotFound(err) {
+ return members, removed
+ }
+ if lg != nil {
+ lg.Panic("failed to get members from store", zap.String("path", StoreMembersPrefix), zap.Error(err))
+ } else {
+ plog.Panicf("get storeMembers should never fail: %v", err)
+ }
+ }
+ for _, n := range e.Node.Nodes {
+ var m *Member
+ m, err = nodeToMember(n)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to nodeToMember", zap.Error(err))
+ } else {
+ plog.Panicf("nodeToMember should never fail: %v", err)
+ }
+ }
+ members[m.ID] = m
+ }
+
+ e, err = st.Get(storeRemovedMembersPrefix, true, true)
+ if err != nil {
+ if isKeyNotFound(err) {
+ return members, removed
+ }
+ if lg != nil {
+ lg.Panic(
+ "failed to get removed members from store",
+ zap.String("path", storeRemovedMembersPrefix),
+ zap.Error(err),
+ )
+ } else {
+ plog.Panicf("get storeRemovedMembers should never fail: %v", err)
+ }
+ }
+ for _, n := range e.Node.Nodes {
+ removed[MustParseMemberIDFromKey(n.Key)] = true
+ }
+ return members, removed
+}
+
+func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version {
+ e, err := st.Get(path.Join(storePrefix, "version"), false, false)
+ if err != nil {
+ if isKeyNotFound(err) {
+ return nil
+ }
+ if lg != nil {
+ lg.Panic(
+ "failed to get cluster version from store",
+ zap.String("path", path.Join(storePrefix, "version")),
+ zap.Error(err),
+ )
+ } else {
+ plog.Panicf("unexpected error (%v) when getting cluster version from store", err)
+ }
+ }
+ return semver.Must(semver.NewVersion(*e.Node.Value))
+}
+
+// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs
+// with the existing cluster. If the validation succeeds, it assigns the IDs
+// from the existing cluster to the local cluster.
+// If the validation fails, an error will be returned.
+func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *RaftCluster) error {
+ ems := existing.Members()
+ lms := local.Members()
+ if len(ems) != len(lms) {
+ return fmt.Errorf("member count is unequal")
+ }
+ sort.Sort(MembersByPeerURLs(ems))
+ sort.Sort(MembersByPeerURLs(lms))
+
+ ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+ defer cancel()
+ for i := range ems {
+ if ok, err := netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[i].PeerURLs); !ok {
+ return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err)
+ }
+ lms[i].ID = ems[i].ID
+ }
+ local.members = make(map[types.ID]*Member)
+ for _, m := range lms {
+ local.members[m.ID] = m
+ }
+ return nil
+}
+
+func mustDetectDowngrade(lg *zap.Logger, cv *semver.Version) {
+ lv := semver.Must(semver.NewVersion(version.Version))
+ // only keep major.minor version for comparison against cluster version
+ lv = &semver.Version{Major: lv.Major, Minor: lv.Minor}
+ if cv != nil && lv.LessThan(*cv) {
+ if lg != nil {
+ lg.Fatal(
+ "invalid downgrade; server version is lower than determined cluster version",
+ zap.String("current-server-version", version.Version),
+ zap.String("determined-cluster-version", version.Cluster(cv.String())),
+ )
+ } else {
+ plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String()))
+ }
+ }
+}
+
+// IsLocalMemberLearner returns if the local member is raft learner
+func (c *RaftCluster) IsLocalMemberLearner() bool {
+ c.Lock()
+ defer c.Unlock()
+ localMember, ok := c.members[c.localID]
+ if !ok {
+ if c.lg != nil {
+ c.lg.Panic(
+ "failed to find local ID in cluster members",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ } else {
+ plog.Panicf("failed to find local ID %s in cluster %s", c.localID.String(), c.cid.String())
+ }
+ }
+ return localMember.IsLearner
+}
+
+// IsMemberExist returns if the member with the given id exists in cluster.
+func (c *RaftCluster) IsMemberExist(id types.ID) bool {
+ c.Lock()
+ defer c.Unlock()
+ _, ok := c.members[id]
+ return ok
+}
+
+// VotingMemberIDs returns the ID of voting members in cluster.
+func (c *RaftCluster) VotingMemberIDs() []types.ID {
+ c.Lock()
+ defer c.Unlock()
+ var ids []types.ID
+ for _, m := range c.members {
+ if !m.IsLearner {
+ ids = append(ids, m.ID)
+ }
+ }
+ sort.Sort(types.IDSlice(ids))
+ return ids
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/membership/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/doc.go
new file mode 100644
index 000000000000..b07fb2d92859
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package membership describes individual etcd members and clusters of members.
+package membership
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/membership/errors.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/errors.go
new file mode 100644
index 000000000000..8f6fe504e4bc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/errors.go
@@ -0,0 +1,35 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "errors"
+
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+)
+
+var (
+ ErrIDRemoved = errors.New("membership: ID removed")
+ ErrIDExists = errors.New("membership: ID exists")
+ ErrIDNotFound = errors.New("membership: ID not found")
+ ErrPeerURLexists = errors.New("membership: peerURL exists")
+ ErrMemberNotLearner = errors.New("membership: can only promote a learner member")
+ ErrTooManyLearners = errors.New("membership: too many learner members in cluster")
+)
+
+func isKeyNotFound(err error) bool {
+ e, ok := err.(*v2error.Error)
+ return ok && e.ErrorCode == v2error.EcodeKeyNotFound
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/membership/member.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/member.go
new file mode 100644
index 000000000000..896cb36aa452
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/member.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "crypto/sha1"
+ "encoding/binary"
+ "fmt"
+ "math/rand"
+ "sort"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.etcd.io/etcd/pkg/types"
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd/v3", "etcdserver/membership")
+)
+
+// RaftAttributes represents the raft related attributes of an etcd member.
+type RaftAttributes struct {
+ // PeerURLs is the list of peers in the raft cluster.
+ // TODO(philips): ensure these are URLs
+ PeerURLs []string `json:"peerURLs"`
+ // IsLearner indicates if the member is raft learner.
+ IsLearner bool `json:"isLearner,omitempty"`
+}
+
+// Attributes represents all the non-raft related attributes of an etcd member.
+type Attributes struct {
+ Name string `json:"name,omitempty"`
+ ClientURLs []string `json:"clientURLs,omitempty"`
+}
+
+type Member struct {
+ ID types.ID `json:"id"`
+ RaftAttributes
+ Attributes
+}
+
+// NewMember creates a Member without an ID and generates one based on the
+// cluster name, peer URLs, and time. This is used for bootstrapping/adding new member.
+func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
+ return newMember(name, peerURLs, clusterName, now, false)
+}
+
+// NewMemberAsLearner creates a learner Member without an ID and generates one based on the
+// cluster name, peer URLs, and time. This is used for adding new learner member.
+func NewMemberAsLearner(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
+ return newMember(name, peerURLs, clusterName, now, true)
+}
+
+func newMember(name string, peerURLs types.URLs, clusterName string, now *time.Time, isLearner bool) *Member {
+ m := &Member{
+ RaftAttributes: RaftAttributes{
+ PeerURLs: peerURLs.StringSlice(),
+ IsLearner: isLearner,
+ },
+ Attributes: Attributes{Name: name},
+ }
+
+ var b []byte
+ sort.Strings(m.PeerURLs)
+ for _, p := range m.PeerURLs {
+ b = append(b, []byte(p)...)
+ }
+
+ b = append(b, []byte(clusterName)...)
+ if now != nil {
+ b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...)
+ }
+
+ hash := sha1.Sum(b)
+ m.ID = types.ID(binary.BigEndian.Uint64(hash[:8]))
+ return m
+}
+
+// PickPeerURL chooses a random address from a given Member's PeerURLs.
+// It will panic if there is no PeerURLs available in Member.
+func (m *Member) PickPeerURL() string {
+ if len(m.PeerURLs) == 0 {
+ panic("member should always have some peer url")
+ }
+ return m.PeerURLs[rand.Intn(len(m.PeerURLs))]
+}
+
+func (m *Member) Clone() *Member {
+ if m == nil {
+ return nil
+ }
+ mm := &Member{
+ ID: m.ID,
+ RaftAttributes: RaftAttributes{
+ IsLearner: m.IsLearner,
+ },
+ Attributes: Attributes{
+ Name: m.Name,
+ },
+ }
+ if m.PeerURLs != nil {
+ mm.PeerURLs = make([]string, len(m.PeerURLs))
+ copy(mm.PeerURLs, m.PeerURLs)
+ }
+ if m.ClientURLs != nil {
+ mm.ClientURLs = make([]string, len(m.ClientURLs))
+ copy(mm.ClientURLs, m.ClientURLs)
+ }
+ return mm
+}
+
+func (m *Member) IsStarted() bool {
+ return len(m.Name) != 0
+}
+
+// MembersByID implements sort by ID interface
+type MembersByID []*Member
+
+func (ms MembersByID) Len() int { return len(ms) }
+func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID }
+func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
+
+// MembersByPeerURLs implements sort by peer urls interface
+type MembersByPeerURLs []*Member
+
+func (ms MembersByPeerURLs) Len() int { return len(ms) }
+func (ms MembersByPeerURLs) Less(i, j int) bool {
+ return ms[i].PeerURLs[0] < ms[j].PeerURLs[0]
+}
+func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/membership/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/metrics.go
new file mode 100644
index 000000000000..b3212bc80cdb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/metrics.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ ClusterVersionMetrics = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "cluster",
+ Name: "version",
+ Help: "Which version is running. 1 for 'cluster_version' label with current cluster version",
+ },
+ []string{"cluster_version"})
+)
+
+func init() {
+ prometheus.MustRegister(ClusterVersionMetrics)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/membership/store.go b/vendor/go.etcd.io/etcd/etcdserver/api/membership/store.go
new file mode 100644
index 000000000000..14ab1190ed98
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/membership/store.go
@@ -0,0 +1,193 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+
+ "go.etcd.io/etcd/etcdserver/api/v2store"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/coreos/go-semver/semver"
+)
+
+const (
+ attributesSuffix = "attributes"
+ raftAttributesSuffix = "raftAttributes"
+
+ // the prefix for stroing membership related information in store provided by store pkg.
+ storePrefix = "/0"
+)
+
+var (
+ membersBucketName = []byte("members")
+ membersRemovedBucketName = []byte("members_removed")
+ clusterBucketName = []byte("cluster")
+
+ StoreMembersPrefix = path.Join(storePrefix, "members")
+ storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members")
+)
+
+func mustSaveMemberToBackend(be backend.Backend, m *Member) {
+ mkey := backendMemberKey(m.ID)
+ mvalue, err := json.Marshal(m)
+ if err != nil {
+ plog.Panicf("marshal raftAttributes should never fail: %v", err)
+ }
+
+ tx := be.BatchTx()
+ tx.Lock()
+ tx.UnsafePut(membersBucketName, mkey, mvalue)
+ tx.Unlock()
+}
+
+func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) {
+ mkey := backendMemberKey(id)
+
+ tx := be.BatchTx()
+ tx.Lock()
+ tx.UnsafeDelete(membersBucketName, mkey)
+ tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed"))
+ tx.Unlock()
+}
+
+func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) {
+ ckey := backendClusterVersionKey()
+
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ tx.UnsafePut(clusterBucketName, ckey, []byte(ver.String()))
+}
+
+func mustSaveMemberToStore(s v2store.Store, m *Member) {
+ b, err := json.Marshal(m.RaftAttributes)
+ if err != nil {
+ plog.Panicf("marshal raftAttributes should never fail: %v", err)
+ }
+ p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
+ if _, err := s.Create(p, false, string(b), false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ plog.Panicf("create raftAttributes should never fail: %v", err)
+ }
+}
+
+func mustDeleteMemberFromStore(s v2store.Store, id types.ID) {
+ if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil {
+ plog.Panicf("delete member should never fail: %v", err)
+ }
+ if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ plog.Panicf("create removedMember should never fail: %v", err)
+ }
+}
+
+func mustUpdateMemberInStore(s v2store.Store, m *Member) {
+ b, err := json.Marshal(m.RaftAttributes)
+ if err != nil {
+ plog.Panicf("marshal raftAttributes should never fail: %v", err)
+ }
+ p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
+ if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ plog.Panicf("update raftAttributes should never fail: %v", err)
+ }
+}
+
+func mustUpdateMemberAttrInStore(s v2store.Store, m *Member) {
+ b, err := json.Marshal(m.Attributes)
+ if err != nil {
+ plog.Panicf("marshal raftAttributes should never fail: %v", err)
+ }
+ p := path.Join(MemberStoreKey(m.ID), attributesSuffix)
+ if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ plog.Panicf("update raftAttributes should never fail: %v", err)
+ }
+}
+
+func mustSaveClusterVersionToStore(s v2store.Store, ver *semver.Version) {
+ if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ plog.Panicf("save cluster version should never fail: %v", err)
+ }
+}
+
+// nodeToMember builds member from a key value node.
+// the child nodes of the given node MUST be sorted by key.
+func nodeToMember(n *v2store.NodeExtern) (*Member, error) {
+ m := &Member{ID: MustParseMemberIDFromKey(n.Key)}
+ attrs := make(map[string][]byte)
+ raftAttrKey := path.Join(n.Key, raftAttributesSuffix)
+ attrKey := path.Join(n.Key, attributesSuffix)
+ for _, nn := range n.Nodes {
+ if nn.Key != raftAttrKey && nn.Key != attrKey {
+ return nil, fmt.Errorf("unknown key %q", nn.Key)
+ }
+ attrs[nn.Key] = []byte(*nn.Value)
+ }
+ if data := attrs[raftAttrKey]; data != nil {
+ if err := json.Unmarshal(data, &m.RaftAttributes); err != nil {
+ return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err)
+ }
+ } else {
+ return nil, fmt.Errorf("raftAttributes key doesn't exist")
+ }
+ if data := attrs[attrKey]; data != nil {
+ if err := json.Unmarshal(data, &m.Attributes); err != nil {
+ return m, fmt.Errorf("unmarshal attributes error: %v", err)
+ }
+ }
+ return m, nil
+}
+
+func backendMemberKey(id types.ID) []byte {
+ return []byte(id.String())
+}
+
+func backendClusterVersionKey() []byte {
+ return []byte("clusterVersion")
+}
+
+func mustCreateBackendBuckets(be backend.Backend) {
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(membersBucketName)
+ tx.UnsafeCreateBucket(membersRemovedBucketName)
+ tx.UnsafeCreateBucket(clusterBucketName)
+}
+
+func MemberStoreKey(id types.ID) string {
+ return path.Join(StoreMembersPrefix, id.String())
+}
+
+func StoreClusterVersionKey() string {
+ return path.Join(storePrefix, "version")
+}
+
+func MemberAttributesStorePath(id types.ID) string {
+ return path.Join(MemberStoreKey(id), attributesSuffix)
+}
+
+func MustParseMemberIDFromKey(key string) types.ID {
+ id, err := types.IDFromString(path.Base(key))
+ if err != nil {
+ plog.Panicf("unexpected parse member id error: %v", err)
+ }
+ return id
+}
+
+func RemovedMemberStoreKey(id types.ID) string {
+ return path.Join(storeRemovedMembersPrefix, id.String())
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/coder.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/coder.go
new file mode 100644
index 000000000000..12c3e44242c4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/coder.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import "go.etcd.io/etcd/raft/raftpb"
+
+type encoder interface {
+ // encode encodes the given message to an output stream.
+ encode(m *raftpb.Message) error
+}
+
+type decoder interface {
+ // decode decodes the message from an input stream.
+ decode() (raftpb.Message, error)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/doc.go
new file mode 100644
index 000000000000..a9486a8bb664
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rafthttp implements HTTP transportation layer for etcd/raft pkg.
+package rafthttp
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/http.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/http.go
new file mode 100644
index 000000000000..0b05282c04fd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/http.go
@@ -0,0 +1,581 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "path"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/snap"
+ pioutil "go.etcd.io/etcd/pkg/ioutil"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft/raftpb"
+ "go.etcd.io/etcd/version"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+)
+
+const (
+ // connReadLimitByte limits the number of bytes
+ // a single read can read out.
+ //
+ // 64KB should be large enough for not causing
+ // throughput bottleneck as well as small enough
+ // for not causing a read timeout.
+ connReadLimitByte = 64 * 1024
+)
+
+var (
+ RaftPrefix = "/raft"
+ ProbingPrefix = path.Join(RaftPrefix, "probing")
+ RaftStreamPrefix = path.Join(RaftPrefix, "stream")
+ RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot")
+
+ errIncompatibleVersion = errors.New("incompatible version")
+ errClusterIDMismatch = errors.New("cluster ID mismatch")
+)
+
+type peerGetter interface {
+ Get(id types.ID) Peer
+}
+
+type writerToResponse interface {
+ WriteTo(w http.ResponseWriter)
+}
+
+type pipelineHandler struct {
+ lg *zap.Logger
+ localID types.ID
+ tr Transporter
+ r Raft
+ cid types.ID
+}
+
+// newPipelineHandler returns a handler for handling raft messages
+// from pipeline for RaftPrefix.
+//
+// The handler reads out the raft message from request body,
+// and forwards it to the given raft state machine for processing.
+func newPipelineHandler(t *Transport, r Raft, cid types.ID) http.Handler {
+ return &pipelineHandler{
+ lg: t.Logger,
+ localID: t.ID,
+ tr: t,
+ r: r,
+ cid: cid,
+ }
+}
+
+func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ w.Header().Set("Allow", "POST")
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+ if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil {
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ return
+ }
+
+ addRemoteFromRequest(h.tr, r)
+
+ // Limit the data size that could be read from the request body, which ensures that read from
+ // connection will not time out accidentally due to possible blocking in underlying implementation.
+ limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte)
+ b, err := ioutil.ReadAll(limitedr)
+ if err != nil {
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to read Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("failed to read raft message (%v)", err)
+ }
+ http.Error(w, "error reading raft message", http.StatusBadRequest)
+ recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+ return
+ }
+
+ var m raftpb.Message
+ if err := m.Unmarshal(b); err != nil {
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to unmarshal Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("failed to unmarshal raft message (%v)", err)
+ }
+ http.Error(w, "error unmarshalling raft message", http.StatusBadRequest)
+ recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+ return
+ }
+
+ receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(len(b)))
+
+ if err := h.r.Process(context.TODO(), m); err != nil {
+ switch v := err.(type) {
+ case writerToResponse:
+ v.WriteTo(w)
+ default:
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to process Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("failed to process raft message (%v)", err)
+ }
+ http.Error(w, "error processing raft message", http.StatusInternalServerError)
+ w.(http.Flusher).Flush()
+ // disconnect the http stream
+ panic(err)
+ }
+ return
+ }
+
+ // Write StatusNoContent header after the message has been processed by
+ // raft, which facilitates the client to report MsgSnap status.
+ w.WriteHeader(http.StatusNoContent)
+}
+
+type snapshotHandler struct {
+ lg *zap.Logger
+ tr Transporter
+ r Raft
+ snapshotter *snap.Snapshotter
+
+ localID types.ID
+ cid types.ID
+}
+
+func newSnapshotHandler(t *Transport, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler {
+ return &snapshotHandler{
+ lg: t.Logger,
+ tr: t,
+ r: r,
+ snapshotter: snapshotter,
+ localID: t.ID,
+ cid: cid,
+ }
+}
+
+const unknownSnapshotSender = "UNKNOWN_SNAPSHOT_SENDER"
+
+// ServeHTTP serves HTTP request to receive and process snapshot message.
+//
+// If request sender dies without closing underlying TCP connection,
+// the handler will keep waiting for the request body until TCP keepalive
+// finds out that the connection is broken after several minutes.
+// This is acceptable because
+// 1. snapshot messages sent through other TCP connections could still be
+// received and processed.
+// 2. this case should happen rarely, so no further optimization is done.
+func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
+
+ if r.Method != "POST" {
+ w.Header().Set("Allow", "POST")
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc()
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+ if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil {
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc()
+ return
+ }
+
+ addRemoteFromRequest(h.tr, r)
+
+ dec := &messageDecoder{r: r.Body}
+ // let snapshots be very large since they can exceed 512MB for large installations
+ m, err := dec.decodeLimit(uint64(1 << 63))
+ from := types.ID(m.From).String()
+ if err != nil {
+ msg := fmt.Sprintf("failed to decode raft message (%v)", err)
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to decode Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Error(err),
+ )
+ } else {
+ plog.Error(msg)
+ }
+ http.Error(w, msg, http.StatusBadRequest)
+ recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+ snapshotReceiveFailures.WithLabelValues(from).Inc()
+ return
+ }
+
+ msgSizeVal := m.Size()
+ msgSize := humanize.Bytes(uint64(msgSizeVal))
+ receivedBytes.WithLabelValues(from).Add(float64(msgSizeVal))
+
+ if m.Type != raftpb.MsgSnap {
+ if h.lg != nil {
+ h.lg.Warn(
+ "unexpected Raft message type",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.String("message-type", m.Type.String()),
+ )
+ } else {
+ plog.Errorf("unexpected raft message type %s on snapshot path", m.Type)
+ }
+ http.Error(w, "wrong raft message type", http.StatusBadRequest)
+ snapshotReceiveFailures.WithLabelValues(from).Inc()
+ return
+ }
+
+ snapshotReceiveInflights.WithLabelValues(from).Inc()
+ defer func() {
+ snapshotReceiveInflights.WithLabelValues(from).Dec()
+ }()
+
+ if h.lg != nil {
+ h.lg.Info(
+ "receiving database snapshot",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index),
+ zap.Int("incoming-snapshot-message-size-bytes", msgSizeVal),
+ zap.String("incoming-snapshot-message-size", msgSize),
+ )
+ } else {
+ plog.Infof("receiving database snapshot [index: %d, from: %s, raft message size: %s]", m.Snapshot.Metadata.Index, types.ID(m.From), msgSize)
+ }
+
+ // save incoming database snapshot.
+ n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index)
+ if err != nil {
+ msg := fmt.Sprintf("failed to save KV snapshot (%v)", err)
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to save incoming database snapshot",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index),
+ zap.Error(err),
+ )
+ } else {
+ plog.Error(msg)
+ }
+ http.Error(w, msg, http.StatusInternalServerError)
+ snapshotReceiveFailures.WithLabelValues(from).Inc()
+ return
+ }
+
+ dbSize := humanize.Bytes(uint64(n))
+ receivedBytes.WithLabelValues(from).Add(float64(n))
+
+ downloadTook := time.Since(start)
+ if h.lg != nil {
+ h.lg.Info(
+ "received and saved database snapshot",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index),
+ zap.Int64("incoming-snapshot-size-bytes", n),
+ zap.String("incoming-snapshot-size", dbSize),
+ zap.String("download-took", downloadTook.String()),
+ )
+ } else {
+ plog.Infof("successfully received and saved database snapshot [index: %d, from: %s, raft message size: %s, db size: %s, took: %s]", m.Snapshot.Metadata.Index, types.ID(m.From), msgSize, dbSize, downloadTook.String())
+ }
+
+ if err := h.r.Process(context.TODO(), m); err != nil {
+ switch v := err.(type) {
+ // Process may return writerToResponse error when doing some
+ // additional checks before calling raft.Node.Step.
+ case writerToResponse:
+ v.WriteTo(w)
+ default:
+ msg := fmt.Sprintf("failed to process raft message (%v)", err)
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to process Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Error(err),
+ )
+ } else {
+ plog.Error(msg)
+ }
+ http.Error(w, msg, http.StatusInternalServerError)
+ snapshotReceiveFailures.WithLabelValues(from).Inc()
+ }
+ return
+ }
+
+ // Write StatusNoContent header after the message has been processed by
+ // raft, which facilitates the client to report MsgSnap status.
+ w.WriteHeader(http.StatusNoContent)
+
+ snapshotReceive.WithLabelValues(from).Inc()
+ snapshotReceiveSeconds.WithLabelValues(from).Observe(time.Since(start).Seconds())
+}
+
+type streamHandler struct {
+ lg *zap.Logger
+ tr *Transport
+ peerGetter peerGetter
+ r Raft
+ id types.ID
+ cid types.ID
+}
+
+func newStreamHandler(t *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler {
+ return &streamHandler{
+ lg: t.Logger,
+ tr: t,
+ peerGetter: pg,
+ r: r,
+ id: id,
+ cid: cid,
+ }
+}
+
+func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "GET" {
+ w.Header().Set("Allow", "GET")
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("X-Server-Version", version.Version)
+ w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+ if err := checkClusterCompatibilityFromHeader(h.lg, h.tr.ID, r.Header, h.cid); err != nil {
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ return
+ }
+
+ var t streamType
+ switch path.Dir(r.URL.Path) {
+ case streamTypeMsgAppV2.endpoint():
+ t = streamTypeMsgAppV2
+ case streamTypeMessage.endpoint():
+ t = streamTypeMessage
+ default:
+ if h.lg != nil {
+ h.lg.Debug(
+ "ignored unexpected streaming request path",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("path", r.URL.Path),
+ )
+ } else {
+ plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path)
+ }
+ http.Error(w, "invalid path", http.StatusNotFound)
+ return
+ }
+
+ fromStr := path.Base(r.URL.Path)
+ from, err := types.IDFromString(fromStr)
+ if err != nil {
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to parse path into ID",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("path", fromStr),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err)
+ }
+ http.Error(w, "invalid from", http.StatusNotFound)
+ return
+ }
+ if h.r.IsIDRemoved(uint64(from)) {
+ if h.lg != nil {
+ h.lg.Warn(
+ "rejected stream from remote peer because it was removed",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("remote-peer-id-from", from.String()),
+ )
+ } else {
+ plog.Warningf("rejected the stream from peer %s since it was removed", from)
+ }
+ http.Error(w, "removed member", http.StatusGone)
+ return
+ }
+ p := h.peerGetter.Get(from)
+ if p == nil {
+ // This may happen in following cases:
+ // 1. user starts a remote peer that belongs to a different cluster
+ // with the same cluster ID.
+ // 2. local etcd falls behind of the cluster, and cannot recognize
+ // the members that joined after its current progress.
+ if urls := r.Header.Get("X-PeerURLs"); urls != "" {
+ h.tr.AddRemote(from, strings.Split(urls, ","))
+ }
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to find remote peer in cluster",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("remote-peer-id-from", from.String()),
+ zap.String("cluster-id", h.cid.String()),
+ )
+ } else {
+ plog.Errorf("failed to find member %s in cluster %s", from, h.cid)
+ }
+ http.Error(w, "error sender not found", http.StatusNotFound)
+ return
+ }
+
+ wto := h.id.String()
+ if gto := r.Header.Get("X-Raft-To"); gto != wto {
+ if h.lg != nil {
+ h.lg.Warn(
+ "ignored streaming request; ID mismatch",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("remote-peer-id-header", gto),
+ zap.String("remote-peer-id-from", from.String()),
+ zap.String("cluster-id", h.cid.String()),
+ )
+ } else {
+ plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto)
+ }
+ http.Error(w, "to field mismatch", http.StatusPreconditionFailed)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
+
+ c := newCloseNotifier()
+ conn := &outgoingConn{
+ t: t,
+ Writer: w,
+ Flusher: w.(http.Flusher),
+ Closer: c,
+ localID: h.tr.ID,
+ peerID: h.id,
+ }
+ p.attachOutgoingConn(conn)
+ <-c.closeNotify()
+}
+
+// checkClusterCompatibilityFromHeader checks the cluster compatibility of
+// the local member from the given header.
+// It checks whether the version of local member is compatible with
+// the versions in the header, and whether the cluster ID of local member
+// matches the one in the header.
+func checkClusterCompatibilityFromHeader(lg *zap.Logger, localID types.ID, header http.Header, cid types.ID) error {
+ remoteName := header.Get("X-Server-From")
+
+ remoteServer := serverVersion(header)
+ remoteVs := ""
+ if remoteServer != nil {
+ remoteVs = remoteServer.String()
+ }
+
+ remoteMinClusterVer := minClusterVersion(header)
+ remoteMinClusterVs := ""
+ if remoteMinClusterVer != nil {
+ remoteMinClusterVs = remoteMinClusterVer.String()
+ }
+
+ localServer, localMinCluster, err := checkVersionCompatibility(remoteName, remoteServer, remoteMinClusterVer)
+
+ localVs := ""
+ if localServer != nil {
+ localVs = localServer.String()
+ }
+ localMinClusterVs := ""
+ if localMinCluster != nil {
+ localMinClusterVs = localMinCluster.String()
+ }
+
+ if err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to check version compatibility",
+ zap.String("local-member-id", localID.String()),
+ zap.String("local-member-cluster-id", cid.String()),
+ zap.String("local-member-server-version", localVs),
+ zap.String("local-member-server-minimum-cluster-version", localMinClusterVs),
+ zap.String("remote-peer-server-name", remoteName),
+ zap.String("remote-peer-server-version", remoteVs),
+ zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("request version incompatibility (%v)", err)
+ }
+ return errIncompatibleVersion
+ }
+ if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() {
+ if lg != nil {
+ lg.Warn(
+ "request cluster ID mismatch",
+ zap.String("local-member-id", localID.String()),
+ zap.String("local-member-cluster-id", cid.String()),
+ zap.String("local-member-server-version", localVs),
+ zap.String("local-member-server-minimum-cluster-version", localMinClusterVs),
+ zap.String("remote-peer-server-name", remoteName),
+ zap.String("remote-peer-server-version", remoteVs),
+ zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs),
+ zap.String("remote-peer-cluster-id", gcid),
+ )
+ } else {
+ plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid)
+ }
+ return errClusterIDMismatch
+ }
+ return nil
+}
+
+type closeNotifier struct {
+ done chan struct{}
+}
+
+func newCloseNotifier() *closeNotifier {
+ return &closeNotifier{
+ done: make(chan struct{}),
+ }
+}
+
+func (n *closeNotifier) Close() error {
+ close(n.done)
+ return nil
+}
+
+func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done }
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/metrics.go
new file mode 100644
index 000000000000..02fff84be7c4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/metrics.go
@@ -0,0 +1,186 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ activePeers = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "active_peers",
+ Help: "The current number of active peer connections.",
+ },
+ []string{"Local", "Remote"},
+ )
+
+ disconnectedPeers = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "disconnected_peers_total",
+ Help: "The total number of disconnected peers.",
+ },
+ []string{"Local", "Remote"},
+ )
+
+ sentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_sent_bytes_total",
+ Help: "The total number of bytes sent to peers.",
+ },
+ []string{"To"},
+ )
+
+ receivedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_received_bytes_total",
+ Help: "The total number of bytes received from peers.",
+ },
+ []string{"From"},
+ )
+
+ sentFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_sent_failures_total",
+ Help: "The total number of send failures from peers.",
+ },
+ []string{"To"},
+ )
+
+ recvFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_received_failures_total",
+ Help: "The total number of receive failures from peers.",
+ },
+ []string{"From"},
+ )
+
+ snapshotSend = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_send_success",
+ Help: "Total number of successful snapshot sends",
+ },
+ []string{"To"},
+ )
+
+ snapshotSendInflights = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_send_inflights_total",
+ Help: "Total number of inflight snapshot sends",
+ },
+ []string{"To"},
+ )
+
+ snapshotSendFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_send_failures",
+ Help: "Total number of snapshot send failures",
+ },
+ []string{"To"},
+ )
+
+ snapshotSendSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_send_total_duration_seconds",
+ Help: "Total latency distributions of v3 snapshot sends",
+
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^9 == 51.2 sec
+ Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
+ },
+ []string{"To"},
+ )
+
+ snapshotReceive = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_receive_success",
+ Help: "Total number of successful snapshot receives",
+ },
+ []string{"From"},
+ )
+
+ snapshotReceiveInflights = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_receive_inflights_total",
+ Help: "Total number of inflight snapshot receives",
+ },
+ []string{"From"},
+ )
+
+ snapshotReceiveFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_receive_failures",
+ Help: "Total number of snapshot receive failures",
+ },
+ []string{"From"},
+ )
+
+ snapshotReceiveSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_receive_total_duration_seconds",
+ Help: "Total latency distributions of v3 snapshot receives",
+
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^9 == 51.2 sec
+ Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
+ },
+ []string{"From"},
+ )
+
+ rttSec = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_round_trip_time_seconds",
+ Help: "Round-Trip-Time histogram between peers",
+
+ // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2
+ // highest bucket start of 0.0001 sec * 2^15 == 3.2768 sec
+ Buckets: prometheus.ExponentialBuckets(0.0001, 2, 16),
+ },
+ []string{"To"},
+ )
+)
+
+func init() {
+ prometheus.MustRegister(activePeers)
+ prometheus.MustRegister(disconnectedPeers)
+ prometheus.MustRegister(sentBytes)
+ prometheus.MustRegister(receivedBytes)
+ prometheus.MustRegister(sentFailures)
+ prometheus.MustRegister(recvFailures)
+
+ prometheus.MustRegister(snapshotSend)
+ prometheus.MustRegister(snapshotSendInflights)
+ prometheus.MustRegister(snapshotSendFailures)
+ prometheus.MustRegister(snapshotSendSeconds)
+ prometheus.MustRegister(snapshotReceive)
+ prometheus.MustRegister(snapshotReceiveInflights)
+ prometheus.MustRegister(snapshotReceiveFailures)
+ prometheus.MustRegister(snapshotReceiveSeconds)
+
+ prometheus.MustRegister(rttSec)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msg_codec.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msg_codec.go
new file mode 100644
index 000000000000..2417d222e58b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msg_codec.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "go.etcd.io/etcd/pkg/pbutil"
+ "go.etcd.io/etcd/raft/raftpb"
+)
+
+// messageEncoder is a encoder that can encode all kinds of messages.
+// It MUST be used with a paired messageDecoder.
+type messageEncoder struct {
+ w io.Writer
+}
+
+func (enc *messageEncoder) encode(m *raftpb.Message) error {
+ if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil {
+ return err
+ }
+ _, err := enc.w.Write(pbutil.MustMarshal(m))
+ return err
+}
+
+// messageDecoder is a decoder that can decode all kinds of messages.
+type messageDecoder struct {
+ r io.Reader
+}
+
+var (
+ readBytesLimit uint64 = 512 * 1024 * 1024 // 512 MB
+ ErrExceedSizeLimit = errors.New("rafthttp: error limit exceeded")
+)
+
+func (dec *messageDecoder) decode() (raftpb.Message, error) {
+ return dec.decodeLimit(readBytesLimit)
+}
+
+func (dec *messageDecoder) decodeLimit(numBytes uint64) (raftpb.Message, error) {
+ var m raftpb.Message
+ var l uint64
+ if err := binary.Read(dec.r, binary.BigEndian, &l); err != nil {
+ return m, err
+ }
+ if l > numBytes {
+ return m, ErrExceedSizeLimit
+ }
+ buf := make([]byte, int(l))
+ if _, err := io.ReadFull(dec.r, buf); err != nil {
+ return m, err
+ }
+ return m, m.Unmarshal(buf)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msgappv2_codec.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msgappv2_codec.go
new file mode 100644
index 000000000000..1fa36deb3942
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/msgappv2_codec.go
@@ -0,0 +1,248 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "time"
+
+ stats "go.etcd.io/etcd/etcdserver/api/v2stats"
+ "go.etcd.io/etcd/pkg/pbutil"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft/raftpb"
+)
+
+const (
+ msgTypeLinkHeartbeat uint8 = 0
+ msgTypeAppEntries uint8 = 1
+ msgTypeApp uint8 = 2
+
+ msgAppV2BufSize = 1024 * 1024
+)
+
+// msgappv2 stream sends three types of message: linkHeartbeatMessage,
+// AppEntries and MsgApp. AppEntries is the MsgApp that is sent in
+// replicate state in raft, whose index and term are fully predictable.
+//
+// Data format of linkHeartbeatMessage:
+// | offset | bytes | description |
+// +--------+-------+-------------+
+// | 0 | 1 | \x00 |
+//
+// Data format of AppEntries:
+// | offset | bytes | description |
+// +--------+-------+-------------+
+// | 0 | 1 | \x01 |
+// | 1 | 8 | length of entries |
+// | 9 | 8 | length of first entry |
+// | 17 | n1 | first entry |
+// ...
+// | x | 8 | length of k-th entry data |
+// | x+8 | nk | k-th entry data |
+// | x+8+nk | 8 | commit index |
+//
+// Data format of MsgApp:
+// | offset | bytes | description |
+// +--------+-------+-------------+
+// | 0 | 1 | \x02 |
+// | 1 | 8 | length of encoded message |
+// | 9 | n | encoded message |
+type msgAppV2Encoder struct {
+ w io.Writer
+ fs *stats.FollowerStats
+
+ term uint64
+ index uint64
+ buf []byte
+ uint64buf []byte
+ uint8buf []byte
+}
+
+func newMsgAppV2Encoder(w io.Writer, fs *stats.FollowerStats) *msgAppV2Encoder {
+ return &msgAppV2Encoder{
+ w: w,
+ fs: fs,
+ buf: make([]byte, msgAppV2BufSize),
+ uint64buf: make([]byte, 8),
+ uint8buf: make([]byte, 1),
+ }
+}
+
+func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error {
+ start := time.Now()
+ switch {
+ case isLinkHeartbeatMessage(m):
+ enc.uint8buf[0] = msgTypeLinkHeartbeat
+ if _, err := enc.w.Write(enc.uint8buf); err != nil {
+ return err
+ }
+ case enc.index == m.Index && enc.term == m.LogTerm && m.LogTerm == m.Term:
+ enc.uint8buf[0] = msgTypeAppEntries
+ if _, err := enc.w.Write(enc.uint8buf); err != nil {
+ return err
+ }
+ // write length of entries
+ binary.BigEndian.PutUint64(enc.uint64buf, uint64(len(m.Entries)))
+ if _, err := enc.w.Write(enc.uint64buf); err != nil {
+ return err
+ }
+ for i := 0; i < len(m.Entries); i++ {
+ // write length of entry
+ binary.BigEndian.PutUint64(enc.uint64buf, uint64(m.Entries[i].Size()))
+ if _, err := enc.w.Write(enc.uint64buf); err != nil {
+ return err
+ }
+ if n := m.Entries[i].Size(); n < msgAppV2BufSize {
+ if _, err := m.Entries[i].MarshalTo(enc.buf); err != nil {
+ return err
+ }
+ if _, err := enc.w.Write(enc.buf[:n]); err != nil {
+ return err
+ }
+ } else {
+ if _, err := enc.w.Write(pbutil.MustMarshal(&m.Entries[i])); err != nil {
+ return err
+ }
+ }
+ enc.index++
+ }
+ // write commit index
+ binary.BigEndian.PutUint64(enc.uint64buf, m.Commit)
+ if _, err := enc.w.Write(enc.uint64buf); err != nil {
+ return err
+ }
+ enc.fs.Succ(time.Since(start))
+ default:
+ if err := binary.Write(enc.w, binary.BigEndian, msgTypeApp); err != nil {
+ return err
+ }
+ // write size of message
+ if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil {
+ return err
+ }
+ // write message
+ if _, err := enc.w.Write(pbutil.MustMarshal(m)); err != nil {
+ return err
+ }
+
+ enc.term = m.Term
+ enc.index = m.Index
+ if l := len(m.Entries); l > 0 {
+ enc.index = m.Entries[l-1].Index
+ }
+ enc.fs.Succ(time.Since(start))
+ }
+ return nil
+}
+
+type msgAppV2Decoder struct {
+ r io.Reader
+ local, remote types.ID
+
+ term uint64
+ index uint64
+ buf []byte
+ uint64buf []byte
+ uint8buf []byte
+}
+
+func newMsgAppV2Decoder(r io.Reader, local, remote types.ID) *msgAppV2Decoder {
+ return &msgAppV2Decoder{
+ r: r,
+ local: local,
+ remote: remote,
+ buf: make([]byte, msgAppV2BufSize),
+ uint64buf: make([]byte, 8),
+ uint8buf: make([]byte, 1),
+ }
+}
+
+func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) {
+ var (
+ m raftpb.Message
+ typ uint8
+ )
+ if _, err := io.ReadFull(dec.r, dec.uint8buf); err != nil {
+ return m, err
+ }
+ typ = dec.uint8buf[0]
+ switch typ {
+ case msgTypeLinkHeartbeat:
+ return linkHeartbeatMessage, nil
+ case msgTypeAppEntries:
+ m = raftpb.Message{
+ Type: raftpb.MsgApp,
+ From: uint64(dec.remote),
+ To: uint64(dec.local),
+ Term: dec.term,
+ LogTerm: dec.term,
+ Index: dec.index,
+ }
+
+ // decode entries
+ if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
+ return m, err
+ }
+ l := binary.BigEndian.Uint64(dec.uint64buf)
+ m.Entries = make([]raftpb.Entry, int(l))
+ for i := 0; i < int(l); i++ {
+ if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
+ return m, err
+ }
+ size := binary.BigEndian.Uint64(dec.uint64buf)
+ var buf []byte
+ if size < msgAppV2BufSize {
+ buf = dec.buf[:size]
+ if _, err := io.ReadFull(dec.r, buf); err != nil {
+ return m, err
+ }
+ } else {
+ buf = make([]byte, int(size))
+ if _, err := io.ReadFull(dec.r, buf); err != nil {
+ return m, err
+ }
+ }
+ dec.index++
+ // 1 alloc
+ pbutil.MustUnmarshal(&m.Entries[i], buf)
+ }
+ // decode commit index
+ if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
+ return m, err
+ }
+ m.Commit = binary.BigEndian.Uint64(dec.uint64buf)
+ case msgTypeApp:
+ var size uint64
+ if err := binary.Read(dec.r, binary.BigEndian, &size); err != nil {
+ return m, err
+ }
+ buf := make([]byte, int(size))
+ if _, err := io.ReadFull(dec.r, buf); err != nil {
+ return m, err
+ }
+ pbutil.MustUnmarshal(&m, buf)
+
+ dec.term = m.Term
+ dec.index = m.Index
+ if l := len(m.Entries); l > 0 {
+ dec.index = m.Entries[l-1].Index
+ }
+ default:
+ return m, fmt.Errorf("failed to parse type %d in msgappv2 stream", typ)
+ }
+ return m, nil
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer.go
new file mode 100644
index 000000000000..8130c4a96b28
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer.go
@@ -0,0 +1,374 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/snap"
+ stats "go.etcd.io/etcd/etcdserver/api/v2stats"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft"
+ "go.etcd.io/etcd/raft/raftpb"
+
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+)
+
+const (
+ // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates.
+ // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for
+ // tcp keepalive failing to detect a bad connection, which is at minutes level.
+ // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage
+ // to keep the connection alive.
+ // For short term pipeline connections, the connection MUST be killed to avoid it being
+ // put back to http pkg connection pool.
+ ConnReadTimeout = 5 * time.Second
+ ConnWriteTimeout = 5 * time.Second
+
+ recvBufSize = 4096
+ // maxPendingProposals holds the proposals during one leader election process.
+ // Generally one leader election takes at most 1 sec. It should have
+ // 0-2 election conflicts, and each one takes 0.5 sec.
+ // We assume the number of concurrent proposers is smaller than 4096.
+ // One client blocks on its proposal for at least 1 sec, so 4096 is enough
+ // to hold all proposals.
+ maxPendingProposals = 4096
+
+ streamAppV2 = "streamMsgAppV2"
+ streamMsg = "streamMsg"
+ pipelineMsg = "pipeline"
+ sendSnap = "sendMsgSnap"
+)
+
+type Peer interface {
+ // send sends the message to the remote peer. The function is non-blocking
+ // and has no promise that the message will be received by the remote.
+ // When it fails to send message out, it will report the status to underlying
+ // raft.
+ send(m raftpb.Message)
+
+ // sendSnap sends the merged snapshot message to the remote peer. Its behavior
+ // is similar to send.
+ sendSnap(m snap.Message)
+
+ // update updates the urls of remote peer.
+ update(urls types.URLs)
+
+ // attachOutgoingConn attaches the outgoing connection to the peer for
+ // stream usage. After the call, the ownership of the outgoing
+ // connection hands over to the peer. The peer will close the connection
+ // when it is no longer used.
+ attachOutgoingConn(conn *outgoingConn)
+ // activeSince returns the time that the connection with the
+ // peer becomes active.
+ activeSince() time.Time
+ // stop performs any necessary finalization and terminates the peer
+ // elegantly.
+ stop()
+}
+
+// peer is the representative of a remote raft node. Local raft node sends
+// messages to the remote through peer.
+// Each peer has two underlying mechanisms to send out a message: stream and
+// pipeline.
+// A stream is a receiver initialized long-polling connection, which
+// is always open to transfer messages. Besides general stream, peer also has
+// a optimized stream for sending msgApp since msgApp accounts for large part
+// of all messages. Only raft leader uses the optimized stream to send msgApp
+// to the remote follower node.
+// A pipeline is a series of http clients that send http requests to the remote.
+// It is only used when the stream has not been established.
+type peer struct {
+ lg *zap.Logger
+
+ localID types.ID
+ // id of the remote raft peer node
+ id types.ID
+
+ r Raft
+
+ status *peerStatus
+
+ picker *urlPicker
+
+ msgAppV2Writer *streamWriter
+ writer *streamWriter
+ pipeline *pipeline
+ snapSender *snapshotSender // snapshot sender to send v3 snapshot messages
+ msgAppV2Reader *streamReader
+ msgAppReader *streamReader
+
+ recvc chan raftpb.Message
+ propc chan raftpb.Message
+
+ mu sync.Mutex
+ paused bool
+
+ cancel context.CancelFunc // cancel pending works in go routine created by peer.
+ stopc chan struct{}
+}
+
+func startPeer(t *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
+ if t.Logger != nil {
+ t.Logger.Info("starting remote peer", zap.String("remote-peer-id", peerID.String()))
+ } else {
+ plog.Infof("starting peer %s...", peerID)
+ }
+ defer func() {
+ if t.Logger != nil {
+ t.Logger.Info("started remote peer", zap.String("remote-peer-id", peerID.String()))
+ } else {
+ plog.Infof("started peer %s", peerID)
+ }
+ }()
+
+ status := newPeerStatus(t.Logger, t.ID, peerID)
+ picker := newURLPicker(urls)
+ errorc := t.ErrorC
+ r := t.Raft
+ pipeline := &pipeline{
+ peerID: peerID,
+ tr: t,
+ picker: picker,
+ status: status,
+ followerStats: fs,
+ raft: r,
+ errorc: errorc,
+ }
+ pipeline.start()
+
+ p := &peer{
+ lg: t.Logger,
+ localID: t.ID,
+ id: peerID,
+ r: r,
+ status: status,
+ picker: picker,
+ msgAppV2Writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
+ writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
+ pipeline: pipeline,
+ snapSender: newSnapshotSender(t, picker, peerID, status),
+ recvc: make(chan raftpb.Message, recvBufSize),
+ propc: make(chan raftpb.Message, maxPendingProposals),
+ stopc: make(chan struct{}),
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ p.cancel = cancel
+ go func() {
+ for {
+ select {
+ case mm := <-p.recvc:
+ if err := r.Process(ctx, mm); err != nil {
+ if t.Logger != nil {
+ t.Logger.Warn("failed to process Raft message", zap.Error(err))
+ } else {
+ plog.Warningf("failed to process raft message (%v)", err)
+ }
+ }
+ case <-p.stopc:
+ return
+ }
+ }
+ }()
+
+ // r.Process might block for processing proposal when there is no leader.
+ // Thus propc must be put into a separate routine with recvc to avoid blocking
+ // processing other raft messages.
+ go func() {
+ for {
+ select {
+ case mm := <-p.propc:
+ if err := r.Process(ctx, mm); err != nil {
+ plog.Warningf("failed to process raft message (%v)", err)
+ }
+ case <-p.stopc:
+ return
+ }
+ }
+ }()
+
+ p.msgAppV2Reader = &streamReader{
+ lg: t.Logger,
+ peerID: peerID,
+ typ: streamTypeMsgAppV2,
+ tr: t,
+ picker: picker,
+ status: status,
+ recvc: p.recvc,
+ propc: p.propc,
+ rl: rate.NewLimiter(t.DialRetryFrequency, 1),
+ }
+ p.msgAppReader = &streamReader{
+ lg: t.Logger,
+ peerID: peerID,
+ typ: streamTypeMessage,
+ tr: t,
+ picker: picker,
+ status: status,
+ recvc: p.recvc,
+ propc: p.propc,
+ rl: rate.NewLimiter(t.DialRetryFrequency, 1),
+ }
+
+ p.msgAppV2Reader.start()
+ p.msgAppReader.start()
+
+ return p
+}
+
+func (p *peer) send(m raftpb.Message) {
+ p.mu.Lock()
+ paused := p.paused
+ p.mu.Unlock()
+
+ if paused {
+ return
+ }
+
+ writec, name := p.pick(m)
+ select {
+ case writec <- m:
+ default:
+ p.r.ReportUnreachable(m.To)
+ if isMsgSnap(m) {
+ p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
+ }
+ if p.status.isActive() {
+ if p.lg != nil {
+ p.lg.Warn(
+ "dropped internal Raft message since sending buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", p.localID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Bool("remote-peer-active", p.status.isActive()),
+ )
+ } else {
+ plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name)
+ }
+ } else {
+ if p.lg != nil {
+ p.lg.Warn(
+ "dropped internal Raft message since sending buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", p.localID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Bool("remote-peer-active", p.status.isActive()),
+ )
+ } else {
+ plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name)
+ }
+ }
+ sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
+ }
+}
+
+func (p *peer) sendSnap(m snap.Message) {
+ go p.snapSender.send(m)
+}
+
+func (p *peer) update(urls types.URLs) {
+ p.picker.update(urls)
+}
+
+func (p *peer) attachOutgoingConn(conn *outgoingConn) {
+ var ok bool
+ switch conn.t {
+ case streamTypeMsgAppV2:
+ ok = p.msgAppV2Writer.attach(conn)
+ case streamTypeMessage:
+ ok = p.writer.attach(conn)
+ default:
+ if p.lg != nil {
+ p.lg.Panic("unknown stream type", zap.String("type", conn.t.String()))
+ } else {
+ plog.Panicf("unhandled stream type %s", conn.t)
+ }
+ }
+ if !ok {
+ conn.Close()
+ }
+}
+
+func (p *peer) activeSince() time.Time { return p.status.activeSince() }
+
+// Pause pauses the peer. The peer will simply drops all incoming
+// messages without returning an error.
+func (p *peer) Pause() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.paused = true
+ p.msgAppReader.pause()
+ p.msgAppV2Reader.pause()
+}
+
+// Resume resumes a paused peer.
+func (p *peer) Resume() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.paused = false
+ p.msgAppReader.resume()
+ p.msgAppV2Reader.resume()
+}
+
+func (p *peer) stop() {
+ if p.lg != nil {
+ p.lg.Info("stopping remote peer", zap.String("remote-peer-id", p.id.String()))
+ } else {
+ plog.Infof("stopping peer %s...", p.id)
+ }
+
+ defer func() {
+ if p.lg != nil {
+ p.lg.Info("stopped remote peer", zap.String("remote-peer-id", p.id.String()))
+ } else {
+ plog.Infof("stopped peer %s", p.id)
+ }
+ }()
+
+ close(p.stopc)
+ p.cancel()
+ p.msgAppV2Writer.stop()
+ p.writer.stop()
+ p.pipeline.stop()
+ p.snapSender.stop()
+ p.msgAppV2Reader.stop()
+ p.msgAppReader.stop()
+}
+
+// pick picks a chan for sending the given message. The picked chan and the picked chan
+// string name are returned.
+func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) {
+ var ok bool
+ // Considering MsgSnap may have a big size, e.g., 1G, and will block
+ // stream for a long time, only use one of the N pipelines to send MsgSnap.
+ if isMsgSnap(m) {
+ return p.pipeline.msgc, pipelineMsg
+ } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) {
+ return writec, streamAppV2
+ } else if writec, ok = p.writer.writec(); ok {
+ return writec, streamMsg
+ }
+ return p.pipeline.msgc, pipelineMsg
+}
+
+func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp }
+
+func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap }
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer_status.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer_status.go
new file mode 100644
index 000000000000..66149ff67d23
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/peer_status.go
@@ -0,0 +1,96 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/pkg/types"
+
+ "go.uber.org/zap"
+)
+
+type failureType struct {
+ source string
+ action string
+}
+
+type peerStatus struct {
+ lg *zap.Logger
+ local types.ID
+ id types.ID
+ mu sync.Mutex // protect variables below
+ active bool
+ since time.Time
+}
+
+func newPeerStatus(lg *zap.Logger, local, id types.ID) *peerStatus {
+ return &peerStatus{lg: lg, local: local, id: id}
+}
+
+func (s *peerStatus) activate() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.active {
+ if s.lg != nil {
+ s.lg.Info("peer became active", zap.String("peer-id", s.id.String()))
+ } else {
+ plog.Infof("peer %s became active", s.id)
+ }
+ s.active = true
+ s.since = time.Now()
+
+ activePeers.WithLabelValues(s.local.String(), s.id.String()).Inc()
+ }
+}
+
+func (s *peerStatus) deactivate(failure failureType, reason string) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ msg := fmt.Sprintf("failed to %s %s on %s (%s)", failure.action, s.id, failure.source, reason)
+ if s.active {
+ if s.lg != nil {
+ s.lg.Warn("peer became inactive (message send to peer failed)", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg)))
+ } else {
+ plog.Errorf(msg)
+ plog.Infof("peer %s became inactive (message send to peer failed)", s.id)
+ }
+ s.active = false
+ s.since = time.Time{}
+
+ activePeers.WithLabelValues(s.local.String(), s.id.String()).Dec()
+ disconnectedPeers.WithLabelValues(s.local.String(), s.id.String()).Inc()
+ return
+ }
+
+ if s.lg != nil {
+ s.lg.Debug("peer deactivated again", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg)))
+ }
+}
+
+func (s *peerStatus) isActive() bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.active
+}
+
+func (s *peerStatus) activeSince() time.Time {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.since
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/pipeline.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/pipeline.go
new file mode 100644
index 000000000000..70f92575d13e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/pipeline.go
@@ -0,0 +1,180 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io/ioutil"
+ "sync"
+ "time"
+
+ stats "go.etcd.io/etcd/etcdserver/api/v2stats"
+ "go.etcd.io/etcd/pkg/pbutil"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft"
+ "go.etcd.io/etcd/raft/raftpb"
+
+ "go.uber.org/zap"
+)
+
+const (
+ connPerPipeline = 4
+ // pipelineBufSize is the size of pipeline buffer, which helps hold the
+ // temporary network latency.
+ // The size ensures that pipeline does not drop messages when the network
+ // is out of work for less than 1 second in good path.
+ pipelineBufSize = 64
+)
+
+var errStopped = errors.New("stopped")
+
+type pipeline struct {
+ peerID types.ID
+
+ tr *Transport
+ picker *urlPicker
+ status *peerStatus
+ raft Raft
+ errorc chan error
+ // deprecate when we depercate v2 API
+ followerStats *stats.FollowerStats
+
+ msgc chan raftpb.Message
+ // wait for the handling routines
+ wg sync.WaitGroup
+ stopc chan struct{}
+}
+
+func (p *pipeline) start() {
+ p.stopc = make(chan struct{})
+ p.msgc = make(chan raftpb.Message, pipelineBufSize)
+ p.wg.Add(connPerPipeline)
+ for i := 0; i < connPerPipeline; i++ {
+ go p.handle()
+ }
+
+ if p.tr != nil && p.tr.Logger != nil {
+ p.tr.Logger.Info(
+ "started HTTP pipelining with remote peer",
+ zap.String("local-member-id", p.tr.ID.String()),
+ zap.String("remote-peer-id", p.peerID.String()),
+ )
+ } else {
+ plog.Infof("started HTTP pipelining with peer %s", p.peerID)
+ }
+}
+
+func (p *pipeline) stop() {
+ close(p.stopc)
+ p.wg.Wait()
+
+ if p.tr != nil && p.tr.Logger != nil {
+ p.tr.Logger.Info(
+ "stopped HTTP pipelining with remote peer",
+ zap.String("local-member-id", p.tr.ID.String()),
+ zap.String("remote-peer-id", p.peerID.String()),
+ )
+ } else {
+ plog.Infof("stopped HTTP pipelining with peer %s", p.peerID)
+ }
+}
+
+func (p *pipeline) handle() {
+ defer p.wg.Done()
+
+ for {
+ select {
+ case m := <-p.msgc:
+ start := time.Now()
+ err := p.post(pbutil.MustMarshal(&m))
+ end := time.Now()
+
+ if err != nil {
+ p.status.deactivate(failureType{source: pipelineMsg, action: "write"}, err.Error())
+
+ if m.Type == raftpb.MsgApp && p.followerStats != nil {
+ p.followerStats.Fail()
+ }
+ p.raft.ReportUnreachable(m.To)
+ if isMsgSnap(m) {
+ p.raft.ReportSnapshot(m.To, raft.SnapshotFailure)
+ }
+ sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
+ continue
+ }
+
+ p.status.activate()
+ if m.Type == raftpb.MsgApp && p.followerStats != nil {
+ p.followerStats.Succ(end.Sub(start))
+ }
+ if isMsgSnap(m) {
+ p.raft.ReportSnapshot(m.To, raft.SnapshotFinish)
+ }
+ sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(m.Size()))
+ case <-p.stopc:
+ return
+ }
+ }
+}
+
+// post POSTs a data payload to a url. Returns nil if the POST succeeds,
+// error on any failure.
+func (p *pipeline) post(data []byte) (err error) {
+ u := p.picker.pick()
+ req := createPostRequest(u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID)
+
+ done := make(chan struct{}, 1)
+ ctx, cancel := context.WithCancel(context.Background())
+ req = req.WithContext(ctx)
+ go func() {
+ select {
+ case <-done:
+ case <-p.stopc:
+ waitSchedule()
+ cancel()
+ }
+ }()
+
+ resp, err := p.tr.pipelineRt.RoundTrip(req)
+ done <- struct{}{}
+ if err != nil {
+ p.picker.unreachable(u)
+ return err
+ }
+ defer resp.Body.Close()
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ p.picker.unreachable(u)
+ return err
+ }
+
+ err = checkPostResponse(resp, b, req, p.peerID)
+ if err != nil {
+ p.picker.unreachable(u)
+ // errMemberRemoved is a critical error since a removed member should
+ // always be stopped. So we use reportCriticalError to report it to errorc.
+ if err == errMemberRemoved {
+ reportCriticalError(err, p.errorc)
+ }
+ return err
+ }
+
+ return nil
+}
+
+// waitSchedule waits other goroutines to be scheduled for a while
+func waitSchedule() { time.Sleep(time.Millisecond) }
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/probing_status.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/probing_status.go
new file mode 100644
index 000000000000..474d9a0e437c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/probing_status.go
@@ -0,0 +1,104 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/xiang90/probing"
+ "go.uber.org/zap"
+)
+
+const (
+ // RoundTripperNameRaftMessage is the name of round-tripper that sends
+ // all other Raft messages, other than "snap.Message".
+ RoundTripperNameRaftMessage = "ROUND_TRIPPER_RAFT_MESSAGE"
+ // RoundTripperNameSnapshot is the name of round-tripper that sends merged snapshot message.
+ RoundTripperNameSnapshot = "ROUND_TRIPPER_SNAPSHOT"
+)
+
+var (
+ // proberInterval must be shorter than read timeout.
+ // Or the connection will time-out.
+ proberInterval = ConnReadTimeout - time.Second
+ statusMonitoringInterval = 30 * time.Second
+ statusErrorInterval = 5 * time.Second
+)
+
+func addPeerToProber(lg *zap.Logger, p probing.Prober, id string, us []string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
+ hus := make([]string, len(us))
+ for i := range us {
+ hus[i] = us[i] + ProbingPrefix
+ }
+
+ p.AddHTTP(id, proberInterval, hus)
+
+ s, err := p.Status(id)
+ if err != nil {
+ if lg != nil {
+ lg.Warn("failed to add peer into prober", zap.String("remote-peer-id", id))
+ } else {
+ plog.Errorf("failed to add peer %s into prober", id)
+ }
+ return
+ }
+
+ go monitorProbingStatus(lg, s, id, roundTripperName, rttSecProm)
+}
+
+func monitorProbingStatus(lg *zap.Logger, s probing.Status, id string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
+ // set the first interval short to log error early.
+ interval := statusErrorInterval
+ for {
+ select {
+ case <-time.After(interval):
+ if !s.Health() {
+ if lg != nil {
+ lg.Warn(
+ "prober detected unhealthy status",
+ zap.String("round-tripper-name", roundTripperName),
+ zap.String("remote-peer-id", id),
+ zap.Duration("rtt", s.SRTT()),
+ zap.Error(s.Err()),
+ )
+ } else {
+ plog.Warningf("health check for peer %s could not connect: %v", id, s.Err())
+ }
+ interval = statusErrorInterval
+ } else {
+ interval = statusMonitoringInterval
+ }
+ if s.ClockDiff() > time.Second {
+ if lg != nil {
+ lg.Warn(
+ "prober found high clock drift",
+ zap.String("round-tripper-name", roundTripperName),
+ zap.String("remote-peer-id", id),
+ zap.Duration("clock-drift", s.ClockDiff()),
+ zap.Duration("rtt", s.SRTT()),
+ zap.Error(s.Err()),
+ )
+ } else {
+ plog.Warningf("the clock difference against peer %s is too high [%v > %v]", id, s.ClockDiff(), time.Second)
+ }
+ }
+ rttSecProm.WithLabelValues(id).Observe(s.SRTT().Seconds())
+
+ case <-s.StopNotify():
+ return
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/remote.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/remote.go
new file mode 100644
index 000000000000..1ef2493ed45c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/remote.go
@@ -0,0 +1,99 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft/raftpb"
+
+ "go.uber.org/zap"
+)
+
+type remote struct {
+ lg *zap.Logger
+ localID types.ID
+ id types.ID
+ status *peerStatus
+ pipeline *pipeline
+}
+
+func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote {
+ picker := newURLPicker(urls)
+ status := newPeerStatus(tr.Logger, tr.ID, id)
+ pipeline := &pipeline{
+ peerID: id,
+ tr: tr,
+ picker: picker,
+ status: status,
+ raft: tr.Raft,
+ errorc: tr.ErrorC,
+ }
+ pipeline.start()
+
+ return &remote{
+ lg: tr.Logger,
+ localID: tr.ID,
+ id: id,
+ status: status,
+ pipeline: pipeline,
+ }
+}
+
+func (g *remote) send(m raftpb.Message) {
+ select {
+ case g.pipeline.msgc <- m:
+ default:
+ if g.status.isActive() {
+ if g.lg != nil {
+ g.lg.Warn(
+ "dropped internal Raft message since sending buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", g.localID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", g.id.String()),
+ zap.Bool("remote-peer-active", g.status.isActive()),
+ )
+ } else {
+ plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id)
+ }
+ } else {
+ if g.lg != nil {
+ g.lg.Warn(
+ "dropped Raft message since sending buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", g.localID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", g.id.String()),
+ zap.Bool("remote-peer-active", g.status.isActive()),
+ )
+ } else {
+ plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id)
+ }
+ }
+ sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
+ }
+}
+
+func (g *remote) stop() {
+ g.pipeline.stop()
+}
+
+func (g *remote) Pause() {
+ g.stop()
+}
+
+func (g *remote) Resume() {
+ g.pipeline.start()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/snapshot_sender.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/snapshot_sender.go
new file mode 100644
index 000000000000..7105de188ae7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/snapshot_sender.go
@@ -0,0 +1,209 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/snap"
+ "go.etcd.io/etcd/pkg/httputil"
+ pioutil "go.etcd.io/etcd/pkg/ioutil"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft"
+
+ "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+)
+
+var (
+ // timeout for reading snapshot response body
+ snapResponseReadTimeout = 5 * time.Second
+)
+
+type snapshotSender struct {
+ from, to types.ID
+ cid types.ID
+
+ tr *Transport
+ picker *urlPicker
+ status *peerStatus
+ r Raft
+ errorc chan error
+
+ stopc chan struct{}
+}
+
+func newSnapshotSender(tr *Transport, picker *urlPicker, to types.ID, status *peerStatus) *snapshotSender {
+ return &snapshotSender{
+ from: tr.ID,
+ to: to,
+ cid: tr.ClusterID,
+ tr: tr,
+ picker: picker,
+ status: status,
+ r: tr.Raft,
+ errorc: tr.ErrorC,
+ stopc: make(chan struct{}),
+ }
+}
+
+func (s *snapshotSender) stop() { close(s.stopc) }
+
+func (s *snapshotSender) send(merged snap.Message) {
+ start := time.Now()
+
+ m := merged.Message
+ to := types.ID(m.To).String()
+
+ body := createSnapBody(s.tr.Logger, merged)
+ defer body.Close()
+
+ u := s.picker.pick()
+ req := createPostRequest(u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid)
+
+ snapshotTotalSizeVal := uint64(merged.TotalSize)
+ snapshotTotalSize := humanize.Bytes(snapshotTotalSizeVal)
+ if s.tr.Logger != nil {
+ s.tr.Logger.Info(
+ "sending database snapshot",
+ zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
+ zap.String("remote-peer-id", to),
+ zap.Int64("bytes", merged.TotalSize),
+ zap.String("size", snapshotTotalSize),
+ )
+ } else {
+ plog.Infof("start to send database snapshot [index: %d, to %s, size %s]...", m.Snapshot.Metadata.Index, types.ID(m.To), snapshotTotalSize)
+ }
+
+ snapshotSendInflights.WithLabelValues(to).Inc()
+ defer func() {
+ snapshotSendInflights.WithLabelValues(to).Dec()
+ }()
+
+ err := s.post(req)
+ defer merged.CloseWithError(err)
+ if err != nil {
+ if s.tr.Logger != nil {
+ s.tr.Logger.Warn(
+ "failed to send database snapshot",
+ zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
+ zap.String("remote-peer-id", to),
+ zap.Int64("bytes", merged.TotalSize),
+ zap.String("size", snapshotTotalSize),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("database snapshot [index: %d, to: %s] failed to be sent out (%v)", m.Snapshot.Metadata.Index, types.ID(m.To), err)
+ }
+
+ // errMemberRemoved is a critical error since a removed member should
+ // always be stopped. So we use reportCriticalError to report it to errorc.
+ if err == errMemberRemoved {
+ reportCriticalError(err, s.errorc)
+ }
+
+ s.picker.unreachable(u)
+ s.status.deactivate(failureType{source: sendSnap, action: "post"}, err.Error())
+ s.r.ReportUnreachable(m.To)
+ // report SnapshotFailure to raft state machine. After raft state
+ // machine knows about it, it would pause a while and retry sending
+ // new snapshot message.
+ s.r.ReportSnapshot(m.To, raft.SnapshotFailure)
+ sentFailures.WithLabelValues(to).Inc()
+ snapshotSendFailures.WithLabelValues(to).Inc()
+ return
+ }
+ s.status.activate()
+ s.r.ReportSnapshot(m.To, raft.SnapshotFinish)
+
+ if s.tr.Logger != nil {
+ s.tr.Logger.Info(
+ "sent database snapshot",
+ zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
+ zap.String("remote-peer-id", to),
+ zap.Int64("bytes", merged.TotalSize),
+ zap.String("size", snapshotTotalSize),
+ )
+ } else {
+ plog.Infof("database snapshot [index: %d, to: %s] sent out successfully", m.Snapshot.Metadata.Index, types.ID(m.To))
+ }
+
+ sentBytes.WithLabelValues(to).Add(float64(merged.TotalSize))
+ snapshotSend.WithLabelValues(to).Inc()
+ snapshotSendSeconds.WithLabelValues(to).Observe(time.Since(start).Seconds())
+}
+
+// post posts the given request.
+// It returns nil when request is sent out and processed successfully.
+func (s *snapshotSender) post(req *http.Request) (err error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ req = req.WithContext(ctx)
+ defer cancel()
+
+ type responseAndError struct {
+ resp *http.Response
+ body []byte
+ err error
+ }
+ result := make(chan responseAndError, 1)
+
+ go func() {
+ resp, err := s.tr.pipelineRt.RoundTrip(req)
+ if err != nil {
+ result <- responseAndError{resp, nil, err}
+ return
+ }
+
+ // close the response body when timeouts.
+ // prevents from reading the body forever when the other side dies right after
+ // successfully receives the request body.
+ time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) })
+ body, err := ioutil.ReadAll(resp.Body)
+ result <- responseAndError{resp, body, err}
+ }()
+
+ select {
+ case <-s.stopc:
+ return errStopped
+ case r := <-result:
+ if r.err != nil {
+ return r.err
+ }
+ return checkPostResponse(r.resp, r.body, req, s.to)
+ }
+}
+
+func createSnapBody(lg *zap.Logger, merged snap.Message) io.ReadCloser {
+ buf := new(bytes.Buffer)
+ enc := &messageEncoder{w: buf}
+ // encode raft message
+ if err := enc.encode(&merged.Message); err != nil {
+ if lg != nil {
+ lg.Panic("failed to encode message", zap.Error(err))
+ } else {
+ plog.Panicf("encode message error (%v)", err)
+ }
+ }
+
+ return &pioutil.ReaderAndCloser{
+ Reader: io.MultiReader(buf, merged.ReadCloser),
+ Closer: merged.ReadCloser,
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/stream.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/stream.go
new file mode 100644
index 000000000000..cf7d8ccf62cf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/stream.go
@@ -0,0 +1,747 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "path"
+ "strings"
+ "sync"
+ "time"
+
+ stats "go.etcd.io/etcd/etcdserver/api/v2stats"
+ "go.etcd.io/etcd/pkg/httputil"
+ "go.etcd.io/etcd/pkg/transport"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft/raftpb"
+ "go.etcd.io/etcd/version"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+)
+
+const (
+ streamTypeMessage streamType = "message"
+ streamTypeMsgAppV2 streamType = "msgappv2"
+
+ streamBufSize = 4096
+)
+
+var (
+ errUnsupportedStreamType = fmt.Errorf("unsupported stream type")
+
+ // the key is in string format "major.minor.patch"
+ supportedStream = map[string][]streamType{
+ "2.0.0": {},
+ "2.1.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "2.2.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "2.3.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.0.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.1.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.2.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.3.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.4.0": {streamTypeMsgAppV2, streamTypeMessage},
+ }
+)
+
+type streamType string
+
+func (t streamType) endpoint() string {
+ switch t {
+ case streamTypeMsgAppV2:
+ return path.Join(RaftStreamPrefix, "msgapp")
+ case streamTypeMessage:
+ return path.Join(RaftStreamPrefix, "message")
+ default:
+ plog.Panicf("unhandled stream type %v", t)
+ return ""
+ }
+}
+
+func (t streamType) String() string {
+ switch t {
+ case streamTypeMsgAppV2:
+ return "stream MsgApp v2"
+ case streamTypeMessage:
+ return "stream Message"
+ default:
+ return "unknown stream"
+ }
+}
+
+var (
+ // linkHeartbeatMessage is a special message used as heartbeat message in
+ // link layer. It never conflicts with messages from raft because raft
+ // doesn't send out messages without From and To fields.
+ linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat}
+)
+
+func isLinkHeartbeatMessage(m *raftpb.Message) bool {
+ return m.Type == raftpb.MsgHeartbeat && m.From == 0 && m.To == 0
+}
+
+type outgoingConn struct {
+ t streamType
+ io.Writer
+ http.Flusher
+ io.Closer
+
+ localID types.ID
+ peerID types.ID
+}
+
+// streamWriter writes messages to the attached outgoingConn.
+type streamWriter struct {
+ lg *zap.Logger
+
+ localID types.ID
+ peerID types.ID
+
+ status *peerStatus
+ fs *stats.FollowerStats
+ r Raft
+
+ mu sync.Mutex // guard field working and closer
+ closer io.Closer
+ working bool
+
+ msgc chan raftpb.Message
+ connc chan *outgoingConn
+ stopc chan struct{}
+ done chan struct{}
+}
+
+// startStreamWriter creates a streamWrite and starts a long running go-routine that accepts
+// messages and writes to the attached outgoing connection.
+func startStreamWriter(lg *zap.Logger, local, id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter {
+ w := &streamWriter{
+ lg: lg,
+
+ localID: local,
+ peerID: id,
+
+ status: status,
+ fs: fs,
+ r: r,
+ msgc: make(chan raftpb.Message, streamBufSize),
+ connc: make(chan *outgoingConn),
+ stopc: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ go w.run()
+ return w
+}
+
+func (cw *streamWriter) run() {
+ var (
+ msgc chan raftpb.Message
+ heartbeatc <-chan time.Time
+ t streamType
+ enc encoder
+ flusher http.Flusher
+ batched int
+ )
+ tickc := time.NewTicker(ConnReadTimeout / 3)
+ defer tickc.Stop()
+ unflushed := 0
+
+ if cw.lg != nil {
+ cw.lg.Info(
+ "started stream writer with remote peer",
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ } else {
+ plog.Infof("started streaming with peer %s (writer)", cw.peerID)
+ }
+
+ for {
+ select {
+ case <-heartbeatc:
+ err := enc.encode(&linkHeartbeatMessage)
+ unflushed += linkHeartbeatMessage.Size()
+ if err == nil {
+ flusher.Flush()
+ batched = 0
+ sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
+ unflushed = 0
+ continue
+ }
+
+ cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error())
+
+ sentFailures.WithLabelValues(cw.peerID.String()).Inc()
+ cw.close()
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "lost TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ } else {
+ plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+ }
+ heartbeatc, msgc = nil, nil
+
+ case m := <-msgc:
+ err := enc.encode(&m)
+ if err == nil {
+ unflushed += m.Size()
+
+ if len(msgc) == 0 || batched > streamBufSize/2 {
+ flusher.Flush()
+ sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
+ unflushed = 0
+ batched = 0
+ } else {
+ batched++
+ }
+
+ continue
+ }
+
+ cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error())
+ cw.close()
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "lost TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ } else {
+ plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+ }
+ heartbeatc, msgc = nil, nil
+ cw.r.ReportUnreachable(m.To)
+ sentFailures.WithLabelValues(cw.peerID.String()).Inc()
+
+ case conn := <-cw.connc:
+ cw.mu.Lock()
+ closed := cw.closeUnlocked()
+ t = conn.t
+ switch conn.t {
+ case streamTypeMsgAppV2:
+ enc = newMsgAppV2Encoder(conn.Writer, cw.fs)
+ case streamTypeMessage:
+ enc = &messageEncoder{w: conn.Writer}
+ default:
+ plog.Panicf("unhandled stream type %s", conn.t)
+ }
+ if cw.lg != nil {
+ cw.lg.Info(
+ "set message encoder",
+ zap.String("from", conn.localID.String()),
+ zap.String("to", conn.peerID.String()),
+ zap.String("stream-type", t.String()),
+ )
+ }
+ flusher = conn.Flusher
+ unflushed = 0
+ cw.status.activate()
+ cw.closer = conn.Closer
+ cw.working = true
+ cw.mu.Unlock()
+
+ if closed {
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "closed TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ } else {
+ plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+ }
+ }
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "established TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ } else {
+ plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+ }
+ heartbeatc, msgc = tickc.C, cw.msgc
+
+ case <-cw.stopc:
+ if cw.close() {
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "closed TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ } else {
+ plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
+ }
+ }
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "stopped TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ } else {
+ plog.Infof("stopped streaming with peer %s (writer)", cw.peerID)
+ }
+ close(cw.done)
+ return
+ }
+ }
+}
+
+func (cw *streamWriter) writec() (chan<- raftpb.Message, bool) {
+ cw.mu.Lock()
+ defer cw.mu.Unlock()
+ return cw.msgc, cw.working
+}
+
+func (cw *streamWriter) close() bool {
+ cw.mu.Lock()
+ defer cw.mu.Unlock()
+ return cw.closeUnlocked()
+}
+
+func (cw *streamWriter) closeUnlocked() bool {
+ if !cw.working {
+ return false
+ }
+ if err := cw.closer.Close(); err != nil {
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "failed to close connection with remote peer",
+ zap.String("remote-peer-id", cw.peerID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("peer %s (writer) connection close error: %v", cw.peerID, err)
+ }
+ }
+ if len(cw.msgc) > 0 {
+ cw.r.ReportUnreachable(uint64(cw.peerID))
+ }
+ cw.msgc = make(chan raftpb.Message, streamBufSize)
+ cw.working = false
+ return true
+}
+
+func (cw *streamWriter) attach(conn *outgoingConn) bool {
+ select {
+ case cw.connc <- conn:
+ return true
+ case <-cw.done:
+ return false
+ }
+}
+
+func (cw *streamWriter) stop() {
+ close(cw.stopc)
+ <-cw.done
+}
+
+// streamReader is a long-running go-routine that dials to the remote stream
+// endpoint and reads messages from the response body returned.
+type streamReader struct {
+ lg *zap.Logger
+
+ peerID types.ID
+ typ streamType
+
+ tr *Transport
+ picker *urlPicker
+ status *peerStatus
+ recvc chan<- raftpb.Message
+ propc chan<- raftpb.Message
+
+ rl *rate.Limiter // alters the frequency of dial retrial attempts
+
+ errorc chan<- error
+
+ mu sync.Mutex
+ paused bool
+ closer io.Closer
+
+ ctx context.Context
+ cancel context.CancelFunc
+ done chan struct{}
+}
+
+func (cr *streamReader) start() {
+ cr.done = make(chan struct{})
+ if cr.errorc == nil {
+ cr.errorc = cr.tr.ErrorC
+ }
+ if cr.ctx == nil {
+ cr.ctx, cr.cancel = context.WithCancel(context.Background())
+ }
+ go cr.run()
+}
+
+func (cr *streamReader) run() {
+ t := cr.typ
+
+ if cr.lg != nil {
+ cr.lg.Info(
+ "started stream reader with remote peer",
+ zap.String("stream-reader-type", t.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ )
+ } else {
+ plog.Infof("started streaming with peer %s (%s reader)", cr.peerID, t)
+ }
+
+ for {
+ rc, err := cr.dial(t)
+ if err != nil {
+ if err != errUnsupportedStreamType {
+ cr.status.deactivate(failureType{source: t.String(), action: "dial"}, err.Error())
+ }
+ } else {
+ cr.status.activate()
+ if cr.lg != nil {
+ cr.lg.Info(
+ "established TCP streaming connection with remote peer",
+ zap.String("stream-reader-type", cr.typ.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ )
+ } else {
+ plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
+ }
+ err = cr.decodeLoop(rc, t)
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "lost TCP streaming connection with remote peer",
+ zap.String("stream-reader-type", cr.typ.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
+ }
+ switch {
+ // all data is read out
+ case err == io.EOF:
+ // connection is closed by the remote
+ case transport.IsClosedConnError(err):
+ default:
+ cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error())
+ }
+ }
+ // Wait for a while before new dial attempt
+ err = cr.rl.Wait(cr.ctx)
+ if cr.ctx.Err() != nil {
+ if cr.lg != nil {
+ cr.lg.Info(
+ "stopped stream reader with remote peer",
+ zap.String("stream-reader-type", t.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ )
+ } else {
+ plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t)
+ }
+ close(cr.done)
+ return
+ }
+ if err != nil {
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "rate limit on stream reader with remote peer",
+ zap.String("stream-reader-type", t.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("streaming with peer %s (%s reader) rate limiter error: %v", cr.peerID, t, err)
+ }
+ }
+ }
+}
+
+func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
+ var dec decoder
+ cr.mu.Lock()
+ switch t {
+ case streamTypeMsgAppV2:
+ dec = newMsgAppV2Decoder(rc, cr.tr.ID, cr.peerID)
+ case streamTypeMessage:
+ dec = &messageDecoder{r: rc}
+ default:
+ if cr.lg != nil {
+ cr.lg.Panic("unknown stream type", zap.String("type", t.String()))
+ } else {
+ plog.Panicf("unhandled stream type %s", t)
+ }
+ }
+ select {
+ case <-cr.ctx.Done():
+ cr.mu.Unlock()
+ if err := rc.Close(); err != nil {
+ return err
+ }
+ return io.EOF
+ default:
+ cr.closer = rc
+ }
+ cr.mu.Unlock()
+
+ // gofail: labelRaftDropHeartbeat:
+ for {
+ m, err := dec.decode()
+ if err != nil {
+ cr.mu.Lock()
+ cr.close()
+ cr.mu.Unlock()
+ return err
+ }
+
+ // gofail-go: var raftDropHeartbeat struct{}
+ // continue labelRaftDropHeartbeat
+ receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size()))
+
+ cr.mu.Lock()
+ paused := cr.paused
+ cr.mu.Unlock()
+
+ if paused {
+ continue
+ }
+
+ if isLinkHeartbeatMessage(&m) {
+ // raft is not interested in link layer
+ // heartbeat message, so we should ignore
+ // it.
+ continue
+ }
+
+ recvc := cr.recvc
+ if m.Type == raftpb.MsgProp {
+ recvc = cr.propc
+ }
+
+ select {
+ case recvc <- m:
+ default:
+ if cr.status.isActive() {
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "dropped internal Raft message since receiving buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", types.ID(m.To).String()),
+ zap.Bool("remote-peer-active", cr.status.isActive()),
+ )
+ } else {
+ plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From))
+ }
+ } else {
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "dropped Raft message since receiving buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", types.ID(m.To).String()),
+ zap.Bool("remote-peer-active", cr.status.isActive()),
+ )
+ } else {
+ plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From))
+ }
+ }
+ recvFailures.WithLabelValues(types.ID(m.From).String()).Inc()
+ }
+ }
+}
+
+func (cr *streamReader) stop() {
+ cr.mu.Lock()
+ cr.cancel()
+ cr.close()
+ cr.mu.Unlock()
+ <-cr.done
+}
+
+func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
+ u := cr.picker.pick()
+ uu := u
+ uu.Path = path.Join(t.endpoint(), cr.tr.ID.String())
+
+ if cr.lg != nil {
+ cr.lg.Debug(
+ "dial stream reader",
+ zap.String("from", cr.tr.ID.String()),
+ zap.String("to", cr.peerID.String()),
+ zap.String("address", uu.String()),
+ )
+ }
+ req, err := http.NewRequest("GET", uu.String(), nil)
+ if err != nil {
+ cr.picker.unreachable(u)
+ return nil, fmt.Errorf("failed to make http request to %v (%v)", u, err)
+ }
+ req.Header.Set("X-Server-From", cr.tr.ID.String())
+ req.Header.Set("X-Server-Version", version.Version)
+ req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
+ req.Header.Set("X-Etcd-Cluster-ID", cr.tr.ClusterID.String())
+ req.Header.Set("X-Raft-To", cr.peerID.String())
+
+ setPeerURLsHeader(req, cr.tr.URLs)
+
+ req = req.WithContext(cr.ctx)
+
+ cr.mu.Lock()
+ select {
+ case <-cr.ctx.Done():
+ cr.mu.Unlock()
+ return nil, fmt.Errorf("stream reader is stopped")
+ default:
+ }
+ cr.mu.Unlock()
+
+ resp, err := cr.tr.streamRt.RoundTrip(req)
+ if err != nil {
+ cr.picker.unreachable(u)
+ return nil, err
+ }
+
+ rv := serverVersion(resp.Header)
+ lv := semver.Must(semver.NewVersion(version.Version))
+ if compareMajorMinorVersion(rv, lv) == -1 && !checkStreamSupport(rv, t) {
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+ return nil, errUnsupportedStreamType
+ }
+
+ switch resp.StatusCode {
+ case http.StatusGone:
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+ reportCriticalError(errMemberRemoved, cr.errorc)
+ return nil, errMemberRemoved
+
+ case http.StatusOK:
+ return resp.Body, nil
+
+ case http.StatusNotFound:
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+ return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID)
+
+ case http.StatusPreconditionFailed:
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ cr.picker.unreachable(u)
+ return nil, err
+ }
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+
+ switch strings.TrimSuffix(string(b), "\n") {
+ case errIncompatibleVersion.Error():
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "request sent was ignored by remote peer due to server version incompatibility",
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.Error(errIncompatibleVersion),
+ )
+ } else {
+ plog.Errorf("request sent was ignored by peer %s (server version incompatible)", cr.peerID)
+ }
+ return nil, errIncompatibleVersion
+
+ case errClusterIDMismatch.Error():
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "request sent was ignored by remote peer due to cluster ID mismatch",
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.String("remote-peer-cluster-id", resp.Header.Get("X-Etcd-Cluster-ID")),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("local-member-cluster-id", cr.tr.ClusterID.String()),
+ zap.Error(errClusterIDMismatch),
+ )
+ } else {
+ plog.Errorf("request sent was ignored (cluster ID mismatch: peer[%s]=%s, local=%s)",
+ cr.peerID, resp.Header.Get("X-Etcd-Cluster-ID"), cr.tr.ClusterID)
+ }
+ return nil, errClusterIDMismatch
+
+ default:
+ return nil, fmt.Errorf("unhandled error %q when precondition failed", string(b))
+ }
+
+ default:
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+ return nil, fmt.Errorf("unhandled http status %d", resp.StatusCode)
+ }
+}
+
+func (cr *streamReader) close() {
+ if cr.closer != nil {
+ if err := cr.closer.Close(); err != nil {
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "failed to close remote peer connection",
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("peer %s (reader) connection close error: %v", cr.peerID, err)
+ }
+ }
+ }
+ cr.closer = nil
+}
+
+func (cr *streamReader) pause() {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+ cr.paused = true
+}
+
+func (cr *streamReader) resume() {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+ cr.paused = false
+}
+
+// checkStreamSupport checks whether the stream type is supported in the
+// given version.
+func checkStreamSupport(v *semver.Version, t streamType) bool {
+ nv := &semver.Version{Major: v.Major, Minor: v.Minor}
+ for _, s := range supportedStream[nv.String()] {
+ if s == t {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/transport.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/transport.go
new file mode 100644
index 000000000000..7191c3d6063f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/transport.go
@@ -0,0 +1,467 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "net/http"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/snap"
+ stats "go.etcd.io/etcd/etcdserver/api/v2stats"
+ "go.etcd.io/etcd/pkg/logutil"
+ "go.etcd.io/etcd/pkg/transport"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft"
+ "go.etcd.io/etcd/raft/raftpb"
+
+ "github.com/coreos/pkg/capnslog"
+ "github.com/xiang90/probing"
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+)
+
+var plog = logutil.NewMergeLogger(capnslog.NewPackageLogger("go.etcd.io/etcd", "rafthttp"))
+
+type Raft interface {
+ Process(ctx context.Context, m raftpb.Message) error
+ IsIDRemoved(id uint64) bool
+ ReportUnreachable(id uint64)
+ ReportSnapshot(id uint64, status raft.SnapshotStatus)
+}
+
+type Transporter interface {
+ // Start starts the given Transporter.
+ // Start MUST be called before calling other functions in the interface.
+ Start() error
+ // Handler returns the HTTP handler of the transporter.
+ // A transporter HTTP handler handles the HTTP requests
+ // from remote peers.
+ // The handler MUST be used to handle RaftPrefix(/raft)
+ // endpoint.
+ Handler() http.Handler
+ // Send sends out the given messages to the remote peers.
+ // Each message has a To field, which is an id that maps
+ // to an existing peer in the transport.
+ // If the id cannot be found in the transport, the message
+ // will be ignored.
+ Send(m []raftpb.Message)
+ // SendSnapshot sends out the given snapshot message to a remote peer.
+ // The behavior of SendSnapshot is similar to Send.
+ SendSnapshot(m snap.Message)
+ // AddRemote adds a remote with given peer urls into the transport.
+ // A remote helps newly joined member to catch up the progress of cluster,
+ // and will not be used after that.
+ // It is the caller's responsibility to ensure the urls are all valid,
+ // or it panics.
+ AddRemote(id types.ID, urls []string)
+ // AddPeer adds a peer with given peer urls into the transport.
+ // It is the caller's responsibility to ensure the urls are all valid,
+ // or it panics.
+ // Peer urls are used to connect to the remote peer.
+ AddPeer(id types.ID, urls []string)
+ // RemovePeer removes the peer with given id.
+ RemovePeer(id types.ID)
+ // RemoveAllPeers removes all the existing peers in the transport.
+ RemoveAllPeers()
+ // UpdatePeer updates the peer urls of the peer with the given id.
+ // It is the caller's responsibility to ensure the urls are all valid,
+ // or it panics.
+ UpdatePeer(id types.ID, urls []string)
+ // ActiveSince returns the time that the connection with the peer
+ // of the given id becomes active.
+ // If the connection is active since peer was added, it returns the adding time.
+ // If the connection is currently inactive, it returns zero time.
+ ActiveSince(id types.ID) time.Time
+ // ActivePeers returns the number of active peers.
+ ActivePeers() int
+ // Stop closes the connections and stops the transporter.
+ Stop()
+}
+
+// Transport implements Transporter interface. It provides the functionality
+// to send raft messages to peers, and receive raft messages from peers.
+// User should call Handler method to get a handler to serve requests
+// received from peerURLs.
+// User needs to call Start before calling other functions, and call
+// Stop when the Transport is no longer used.
+type Transport struct {
+ Logger *zap.Logger
+
+ DialTimeout time.Duration // maximum duration before timing out dial of the request
+ // DialRetryFrequency defines the frequency of streamReader dial retrial attempts;
+ // a distinct rate limiter is created per every peer (default value: 10 events/sec)
+ DialRetryFrequency rate.Limit
+
+ TLSInfo transport.TLSInfo // TLS information used when creating connection
+
+ ID types.ID // local member ID
+ URLs types.URLs // local peer URLs
+ ClusterID types.ID // raft cluster ID for request validation
+ Raft Raft // raft state machine, to which the Transport forwards received messages and reports status
+ Snapshotter *snap.Snapshotter
+ ServerStats *stats.ServerStats // used to record general transportation statistics
+ // used to record transportation statistics with followers when
+ // performing as leader in raft protocol
+ LeaderStats *stats.LeaderStats
+ // ErrorC is used to report detected critical errors, e.g.,
+ // the member has been permanently removed from the cluster
+ // When an error is received from ErrorC, user should stop raft state
+ // machine and thus stop the Transport.
+ ErrorC chan error
+
+ streamRt http.RoundTripper // roundTripper used by streams
+ pipelineRt http.RoundTripper // roundTripper used by pipelines
+
+ mu sync.RWMutex // protect the remote and peer map
+ remotes map[types.ID]*remote // remotes map that helps newly joined member to catch up
+ peers map[types.ID]Peer // peers map
+
+ pipelineProber probing.Prober
+ streamProber probing.Prober
+}
+
+func (t *Transport) Start() error {
+ var err error
+ t.streamRt, err = newStreamRoundTripper(t.TLSInfo, t.DialTimeout)
+ if err != nil {
+ return err
+ }
+ t.pipelineRt, err = NewRoundTripper(t.TLSInfo, t.DialTimeout)
+ if err != nil {
+ return err
+ }
+ t.remotes = make(map[types.ID]*remote)
+ t.peers = make(map[types.ID]Peer)
+ t.pipelineProber = probing.NewProber(t.pipelineRt)
+ t.streamProber = probing.NewProber(t.streamRt)
+
+ // If client didn't provide dial retry frequency, use the default
+ // (100ms backoff between attempts to create a new stream),
+ // so it doesn't bring too much overhead when retry.
+ if t.DialRetryFrequency == 0 {
+ t.DialRetryFrequency = rate.Every(100 * time.Millisecond)
+ }
+ return nil
+}
+
+func (t *Transport) Handler() http.Handler {
+ pipelineHandler := newPipelineHandler(t, t.Raft, t.ClusterID)
+ streamHandler := newStreamHandler(t, t, t.Raft, t.ID, t.ClusterID)
+ snapHandler := newSnapshotHandler(t, t.Raft, t.Snapshotter, t.ClusterID)
+ mux := http.NewServeMux()
+ mux.Handle(RaftPrefix, pipelineHandler)
+ mux.Handle(RaftStreamPrefix+"/", streamHandler)
+ mux.Handle(RaftSnapshotPrefix, snapHandler)
+ mux.Handle(ProbingPrefix, probing.NewHandler())
+ return mux
+}
+
+func (t *Transport) Get(id types.ID) Peer {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ return t.peers[id]
+}
+
+func (t *Transport) Send(msgs []raftpb.Message) {
+ for _, m := range msgs {
+ if m.To == 0 {
+ // ignore intentionally dropped message
+ continue
+ }
+ to := types.ID(m.To)
+
+ t.mu.RLock()
+ p, pok := t.peers[to]
+ g, rok := t.remotes[to]
+ t.mu.RUnlock()
+
+ if pok {
+ if m.Type == raftpb.MsgApp {
+ t.ServerStats.SendAppendReq(m.Size())
+ }
+ p.send(m)
+ continue
+ }
+
+ if rok {
+ g.send(m)
+ continue
+ }
+
+ if t.Logger != nil {
+ t.Logger.Debug(
+ "ignored message send request; unknown remote peer target",
+ zap.String("type", m.Type.String()),
+ zap.String("unknown-target-peer-id", to.String()),
+ )
+ } else {
+ plog.Debugf("ignored message %s (sent to unknown peer %s)", m.Type, to)
+ }
+ }
+}
+
+func (t *Transport) Stop() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ for _, r := range t.remotes {
+ r.stop()
+ }
+ for _, p := range t.peers {
+ p.stop()
+ }
+ t.pipelineProber.RemoveAll()
+ t.streamProber.RemoveAll()
+ if tr, ok := t.streamRt.(*http.Transport); ok {
+ tr.CloseIdleConnections()
+ }
+ if tr, ok := t.pipelineRt.(*http.Transport); ok {
+ tr.CloseIdleConnections()
+ }
+ t.peers = nil
+ t.remotes = nil
+}
+
+// CutPeer drops messages to the specified peer.
+func (t *Transport) CutPeer(id types.ID) {
+ t.mu.RLock()
+ p, pok := t.peers[id]
+ g, gok := t.remotes[id]
+ t.mu.RUnlock()
+
+ if pok {
+ p.(Pausable).Pause()
+ }
+ if gok {
+ g.Pause()
+ }
+}
+
+// MendPeer recovers the message dropping behavior of the given peer.
+func (t *Transport) MendPeer(id types.ID) {
+ t.mu.RLock()
+ p, pok := t.peers[id]
+ g, gok := t.remotes[id]
+ t.mu.RUnlock()
+
+ if pok {
+ p.(Pausable).Resume()
+ }
+ if gok {
+ g.Resume()
+ }
+}
+
+func (t *Transport) AddRemote(id types.ID, us []string) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.remotes == nil {
+ // there's no clean way to shutdown the golang http server
+ // (see: https://github.com/golang/go/issues/4674) before
+ // stopping the transport; ignore any new connections.
+ return
+ }
+ if _, ok := t.peers[id]; ok {
+ return
+ }
+ if _, ok := t.remotes[id]; ok {
+ return
+ }
+ urls, err := types.NewURLs(us)
+ if err != nil {
+ if t.Logger != nil {
+ t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
+ } else {
+ plog.Panicf("newURLs %+v should never fail: %+v", us, err)
+ }
+ }
+ t.remotes[id] = startRemote(t, urls, id)
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "added new remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("remote-peer-id", id.String()),
+ zap.Strings("remote-peer-urls", us),
+ )
+ }
+}
+
+func (t *Transport) AddPeer(id types.ID, us []string) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if t.peers == nil {
+ panic("transport stopped")
+ }
+ if _, ok := t.peers[id]; ok {
+ return
+ }
+ urls, err := types.NewURLs(us)
+ if err != nil {
+ if t.Logger != nil {
+ t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
+ } else {
+ plog.Panicf("newURLs %+v should never fail: %+v", us, err)
+ }
+ }
+ fs := t.LeaderStats.Follower(id.String())
+ t.peers[id] = startPeer(t, urls, id, fs)
+ addPeerToProber(t.Logger, t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rttSec)
+ addPeerToProber(t.Logger, t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rttSec)
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "added remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("remote-peer-id", id.String()),
+ zap.Strings("remote-peer-urls", us),
+ )
+ } else {
+ plog.Infof("added peer %s", id)
+ }
+}
+
+func (t *Transport) RemovePeer(id types.ID) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ t.removePeer(id)
+}
+
+func (t *Transport) RemoveAllPeers() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ for id := range t.peers {
+ t.removePeer(id)
+ }
+}
+
+// the caller of this function must have the peers mutex.
+func (t *Transport) removePeer(id types.ID) {
+ if peer, ok := t.peers[id]; ok {
+ peer.stop()
+ } else {
+ if t.Logger != nil {
+ t.Logger.Panic("unexpected removal of unknown remote peer", zap.String("remote-peer-id", id.String()))
+ } else {
+ plog.Panicf("unexpected removal of unknown peer '%d'", id)
+ }
+ }
+ delete(t.peers, id)
+ delete(t.LeaderStats.Followers, id.String())
+ t.pipelineProber.Remove(id.String())
+ t.streamProber.Remove(id.String())
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "removed remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ )
+ } else {
+ plog.Infof("removed peer %s", id)
+ }
+}
+
+func (t *Transport) UpdatePeer(id types.ID, us []string) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ // TODO: return error or just panic?
+ if _, ok := t.peers[id]; !ok {
+ return
+ }
+ urls, err := types.NewURLs(us)
+ if err != nil {
+ if t.Logger != nil {
+ t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
+ } else {
+ plog.Panicf("newURLs %+v should never fail: %+v", us, err)
+ }
+ }
+ t.peers[id].update(urls)
+
+ t.pipelineProber.Remove(id.String())
+ addPeerToProber(t.Logger, t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rttSec)
+ t.streamProber.Remove(id.String())
+ addPeerToProber(t.Logger, t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rttSec)
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "updated remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", us),
+ )
+ } else {
+ plog.Infof("updated peer %s", id)
+ }
+}
+
+func (t *Transport) ActiveSince(id types.ID) time.Time {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if p, ok := t.peers[id]; ok {
+ return p.activeSince()
+ }
+ return time.Time{}
+}
+
+func (t *Transport) SendSnapshot(m snap.Message) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ p := t.peers[types.ID(m.To)]
+ if p == nil {
+ m.CloseWithError(errMemberNotFound)
+ return
+ }
+ p.sendSnap(m)
+}
+
+// Pausable is a testing interface for pausing transport traffic.
+type Pausable interface {
+ Pause()
+ Resume()
+}
+
+func (t *Transport) Pause() {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ for _, p := range t.peers {
+ p.(Pausable).Pause()
+ }
+}
+
+func (t *Transport) Resume() {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ for _, p := range t.peers {
+ p.(Pausable).Resume()
+ }
+}
+
+// ActivePeers returns a channel that closes when an initial
+// peer connection has been established. Use this to wait until the
+// first peer connection becomes active.
+func (t *Transport) ActivePeers() (cnt int) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ for _, p := range t.peers {
+ if !p.activeSince().IsZero() {
+ cnt++
+ }
+ }
+ return cnt
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/urlpick.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/urlpick.go
new file mode 100644
index 000000000000..61ef468649a5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/urlpick.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "net/url"
+ "sync"
+
+ "go.etcd.io/etcd/pkg/types"
+)
+
+type urlPicker struct {
+ mu sync.Mutex // guards urls and picked
+ urls types.URLs
+ picked int
+}
+
+func newURLPicker(urls types.URLs) *urlPicker {
+ return &urlPicker{
+ urls: urls,
+ }
+}
+
+func (p *urlPicker) update(urls types.URLs) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.urls = urls
+ p.picked = 0
+}
+
+func (p *urlPicker) pick() url.URL {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ return p.urls[p.picked]
+}
+
+// unreachable notices the picker that the given url is unreachable,
+// and it should use other possible urls.
+func (p *urlPicker) unreachable(u url.URL) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if u == p.urls[p.picked] {
+ p.picked = (p.picked + 1) % len(p.urls)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/util.go b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/util.go
new file mode 100644
index 000000000000..20938647c7a6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/rafthttp/util.go
@@ -0,0 +1,190 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/pkg/transport"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/version"
+
+ "github.com/coreos/go-semver/semver"
+)
+
+var (
+ errMemberRemoved = fmt.Errorf("the member has been permanently removed from the cluster")
+ errMemberNotFound = fmt.Errorf("member not found")
+)
+
+// NewListener returns a listener for raft message transfer between peers.
+// It uses timeout listener to identify broken streams promptly.
+func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) {
+ return transport.NewTimeoutListener(u.Host, u.Scheme, tlsinfo, ConnReadTimeout, ConnWriteTimeout)
+}
+
+// NewRoundTripper returns a roundTripper used to send requests
+// to rafthttp listener of remote peers.
+func NewRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
+ // It uses timeout transport to pair with remote timeout listeners.
+ // It sets no read/write timeout, because message in requests may
+ // take long time to write out before reading out the response.
+ return transport.NewTimeoutTransport(tlsInfo, dialTimeout, 0, 0)
+}
+
+// newStreamRoundTripper returns a roundTripper used to send stream requests
+// to rafthttp listener of remote peers.
+// Read/write timeout is set for stream roundTripper to promptly
+// find out broken status, which minimizes the number of messages
+// sent on broken connection.
+func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
+ return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout)
+}
+
+// createPostRequest creates a HTTP POST request that sends raft message.
+func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request {
+ uu := u
+ uu.Path = path
+ req, err := http.NewRequest("POST", uu.String(), body)
+ if err != nil {
+ plog.Panicf("unexpected new request error (%v)", err)
+ }
+ req.Header.Set("Content-Type", ct)
+ req.Header.Set("X-Server-From", from.String())
+ req.Header.Set("X-Server-Version", version.Version)
+ req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
+ req.Header.Set("X-Etcd-Cluster-ID", cid.String())
+ setPeerURLsHeader(req, urls)
+
+ return req
+}
+
+// checkPostResponse checks the response of the HTTP POST request that sends
+// raft message.
+func checkPostResponse(resp *http.Response, body []byte, req *http.Request, to types.ID) error {
+ switch resp.StatusCode {
+ case http.StatusPreconditionFailed:
+ switch strings.TrimSuffix(string(body), "\n") {
+ case errIncompatibleVersion.Error():
+ plog.Errorf("request sent was ignored by peer %s (server version incompatible)", to)
+ return errIncompatibleVersion
+ case errClusterIDMismatch.Error():
+ plog.Errorf("request sent was ignored (cluster ID mismatch: remote[%s]=%s, local=%s)",
+ to, resp.Header.Get("X-Etcd-Cluster-ID"), req.Header.Get("X-Etcd-Cluster-ID"))
+ return errClusterIDMismatch
+ default:
+ return fmt.Errorf("unhandled error %q when precondition failed", string(body))
+ }
+ case http.StatusForbidden:
+ return errMemberRemoved
+ case http.StatusNoContent:
+ return nil
+ default:
+ return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String())
+ }
+}
+
+// reportCriticalError reports the given error through sending it into
+// the given error channel.
+// If the error channel is filled up when sending error, it drops the error
+// because the fact that error has happened is reported, which is
+// good enough.
+func reportCriticalError(err error, errc chan<- error) {
+ select {
+ case errc <- err:
+ default:
+ }
+}
+
+// compareMajorMinorVersion returns an integer comparing two versions based on
+// their major and minor version. The result will be 0 if a==b, -1 if a < b,
+// and 1 if a > b.
+func compareMajorMinorVersion(a, b *semver.Version) int {
+ na := &semver.Version{Major: a.Major, Minor: a.Minor}
+ nb := &semver.Version{Major: b.Major, Minor: b.Minor}
+ switch {
+ case na.LessThan(*nb):
+ return -1
+ case nb.LessThan(*na):
+ return 1
+ default:
+ return 0
+ }
+}
+
+// serverVersion returns the server version from the given header.
+func serverVersion(h http.Header) *semver.Version {
+ verStr := h.Get("X-Server-Version")
+ // backward compatibility with etcd 2.0
+ if verStr == "" {
+ verStr = "2.0.0"
+ }
+ return semver.Must(semver.NewVersion(verStr))
+}
+
+// serverVersion returns the min cluster version from the given header.
+func minClusterVersion(h http.Header) *semver.Version {
+ verStr := h.Get("X-Min-Cluster-Version")
+ // backward compatibility with etcd 2.0
+ if verStr == "" {
+ verStr = "2.0.0"
+ }
+ return semver.Must(semver.NewVersion(verStr))
+}
+
+// checkVersionCompatibility checks whether the given version is compatible
+// with the local version.
+func checkVersionCompatibility(name string, server, minCluster *semver.Version) (
+ localServer *semver.Version,
+ localMinCluster *semver.Version,
+ err error) {
+ localServer = semver.Must(semver.NewVersion(version.Version))
+ localMinCluster = semver.Must(semver.NewVersion(version.MinClusterVersion))
+ if compareMajorMinorVersion(server, localMinCluster) == -1 {
+ return localServer, localMinCluster, fmt.Errorf("remote version is too low: remote[%s]=%s, local=%s", name, server, localServer)
+ }
+ if compareMajorMinorVersion(minCluster, localServer) == 1 {
+ return localServer, localMinCluster, fmt.Errorf("local version is too low: remote[%s]=%s, local=%s", name, server, localServer)
+ }
+ return localServer, localMinCluster, nil
+}
+
+// setPeerURLsHeader reports local urls for peer discovery
+func setPeerURLsHeader(req *http.Request, urls types.URLs) {
+ if urls == nil {
+ // often not set in unit tests
+ return
+ }
+ peerURLs := make([]string, urls.Len())
+ for i := range urls {
+ peerURLs[i] = urls[i].String()
+ }
+ req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ","))
+}
+
+// addRemoteFromRequest adds a remote peer according to an http request header
+func addRemoteFromRequest(tr Transporter, r *http.Request) {
+ if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err == nil {
+ if urls := r.Header.Get("X-PeerURLs"); urls != "" {
+ tr.AddRemote(from, strings.Split(urls, ","))
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/snap/db.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/db.go
new file mode 100644
index 000000000000..3002ccdccea3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/db.go
@@ -0,0 +1,104 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+
+ "go.etcd.io/etcd/pkg/fileutil"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+)
+
+var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist")
+
+// SaveDBFrom saves snapshot of the database from the given reader. It
+// guarantees the save operation is atomic.
+func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) {
+ start := time.Now()
+
+ f, err := ioutil.TempFile(s.dir, "tmp")
+ if err != nil {
+ return 0, err
+ }
+ var n int64
+ n, err = io.Copy(f, r)
+ if err == nil {
+ fsyncStart := time.Now()
+ err = fileutil.Fsync(f)
+ snapDBFsyncSec.Observe(time.Since(fsyncStart).Seconds())
+ }
+ f.Close()
+ if err != nil {
+ os.Remove(f.Name())
+ return n, err
+ }
+ fn := s.dbFilePath(id)
+ if fileutil.Exist(fn) {
+ os.Remove(f.Name())
+ return n, nil
+ }
+ err = os.Rename(f.Name(), fn)
+ if err != nil {
+ os.Remove(f.Name())
+ return n, err
+ }
+
+ if s.lg != nil {
+ s.lg.Info(
+ "saved database snapshot to disk",
+ zap.String("path", fn),
+ zap.Int64("bytes", n),
+ zap.String("size", humanize.Bytes(uint64(n))),
+ )
+ } else {
+ plog.Infof("saved database snapshot to disk [total bytes: %d]", n)
+ }
+
+ snapDBSaveSec.Observe(time.Since(start).Seconds())
+ return n, nil
+}
+
+// DBFilePath returns the file path for the snapshot of the database with
+// given id. If the snapshot does not exist, it returns error.
+func (s *Snapshotter) DBFilePath(id uint64) (string, error) {
+ if _, err := fileutil.ReadDir(s.dir); err != nil {
+ return "", err
+ }
+ fn := s.dbFilePath(id)
+ if fileutil.Exist(fn) {
+ return fn, nil
+ }
+ if s.lg != nil {
+ s.lg.Warn(
+ "failed to find [SNAPSHOT-INDEX].snap.db",
+ zap.Uint64("snapshot-index", id),
+ zap.String("snapshot-file-path", fn),
+ zap.Error(ErrNoDBSnapshot),
+ )
+ }
+ return "", ErrNoDBSnapshot
+}
+
+func (s *Snapshotter) dbFilePath(id uint64) string {
+ return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id))
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/snap/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/doc.go
new file mode 100644
index 000000000000..dcc5db579822
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package snap handles Raft nodes' states with snapshots.
+// The snapshot logic is internal to etcd server and raft package.
+package snap
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/snap/message.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/message.go
new file mode 100644
index 000000000000..c1151e27eb13
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/message.go
@@ -0,0 +1,64 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "io"
+
+ "go.etcd.io/etcd/pkg/ioutil"
+ "go.etcd.io/etcd/raft/raftpb"
+)
+
+// Message is a struct that contains a raft Message and a ReadCloser. The type
+// of raft message MUST be MsgSnap, which contains the raft meta-data and an
+// additional data []byte field that contains the snapshot of the actual state
+// machine.
+// Message contains the ReadCloser field for handling large snapshot. This avoid
+// copying the entire snapshot into a byte array, which consumes a lot of memory.
+//
+// User of Message should close the Message after sending it.
+type Message struct {
+ raftpb.Message
+ ReadCloser io.ReadCloser
+ TotalSize int64
+ closeC chan bool
+}
+
+func NewMessage(rs raftpb.Message, rc io.ReadCloser, rcSize int64) *Message {
+ return &Message{
+ Message: rs,
+ ReadCloser: ioutil.NewExactReadCloser(rc, rcSize),
+ TotalSize: int64(rs.Size()) + rcSize,
+ closeC: make(chan bool, 1),
+ }
+}
+
+// CloseNotify returns a channel that receives a single value
+// when the message sent is finished. true indicates the sent
+// is successful.
+func (m Message) CloseNotify() <-chan bool {
+ return m.closeC
+}
+
+func (m Message) CloseWithError(err error) {
+ if cerr := m.ReadCloser.Close(); cerr != nil {
+ err = cerr
+ }
+ if err == nil {
+ m.closeC <- true
+ } else {
+ m.closeC <- false
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/snap/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/metrics.go
new file mode 100644
index 000000000000..2affecf47269
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/metrics.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ snapMarshallingSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "snap",
+ Name: "save_marshalling_duration_seconds",
+ Help: "The marshalling cost distributions of save called by snapshot.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ snapSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "snap",
+ Name: "save_total_duration_seconds",
+ Help: "The total latency distributions of save called by snapshot.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ snapFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "snap",
+ Name: "fsync_duration_seconds",
+ Help: "The latency distributions of fsync called by snap.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ snapDBSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "snap_db",
+ Name: "save_total_duration_seconds",
+ Help: "The total latency distributions of v3 snapshot save",
+
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^9 == 51.2 sec
+ Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
+ })
+
+ snapDBFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "snap_db",
+ Name: "fsync_duration_seconds",
+ Help: "The latency distributions of fsyncing .snap.db file",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+)
+
+func init() {
+ prometheus.MustRegister(snapMarshallingSec)
+ prometheus.MustRegister(snapSaveSec)
+ prometheus.MustRegister(snapFsyncSec)
+ prometheus.MustRegister(snapDBSaveSec)
+ prometheus.MustRegister(snapDBFsyncSec)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.pb.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.pb.go
new file mode 100644
index 000000000000..e72b577f5b8d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.pb.go
@@ -0,0 +1,336 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: snap.proto
+
+/*
+ Package snappb is a generated protocol buffer package.
+
+ It is generated from these files:
+ snap.proto
+
+ It has these top-level messages:
+ Snapshot
+*/
+package snappb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Snapshot struct {
+ Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnap, []int{0} }
+
+func init() {
+ proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
+}
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintSnap(dAtA, i, uint64(m.Crc))
+ if m.Data != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintSnap(dAtA, i, uint64(len(m.Data)))
+ i += copy(dAtA[i:], m.Data)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeVarintSnap(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Snapshot) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovSnap(uint64(m.Crc))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovSnap(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovSnap(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozSnap(x uint64) (n int) {
+ return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
+ }
+ m.Crc = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Crc |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSnap
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSnap(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthSnap
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipSnap(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthSnap
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipSnap(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) }
+
+var fileDescriptorSnap = []byte{
+ // 126 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
+ 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
+ 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c,
+ 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb,
+ 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24,
+ 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1,
+ 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e,
+ 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.proto b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.proto
new file mode 100644
index 000000000000..cd3d21d0ee12
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snappb/snap.proto
@@ -0,0 +1,14 @@
+syntax = "proto2";
+package snappb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message snapshot {
+ optional uint32 crc = 1 [(gogoproto.nullable) = false];
+ optional bytes data = 2;
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/snap/snapshotter.go b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snapshotter.go
new file mode 100644
index 000000000000..c5d6d6183c1b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/snap/snapshotter.go
@@ -0,0 +1,337 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.etcd.io/etcd/etcdserver/api/snap/snappb"
+ pioutil "go.etcd.io/etcd/pkg/ioutil"
+ "go.etcd.io/etcd/pkg/pbutil"
+ "go.etcd.io/etcd/raft"
+ "go.etcd.io/etcd/raft/raftpb"
+ "go.etcd.io/etcd/wal/walpb"
+ "go.uber.org/zap"
+)
+
+const snapSuffix = ".snap"
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd/v3", "snap")
+
+ ErrNoSnapshot = errors.New("snap: no available snapshot")
+ ErrEmptySnapshot = errors.New("snap: empty snapshot")
+ ErrCRCMismatch = errors.New("snap: crc mismatch")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+ // A map of valid files that can be present in the snap folder.
+ validFiles = map[string]bool{
+ "db": true,
+ }
+)
+
+type Snapshotter struct {
+ lg *zap.Logger
+ dir string
+}
+
+func New(lg *zap.Logger, dir string) *Snapshotter {
+ return &Snapshotter{
+ lg: lg,
+ dir: dir,
+ }
+}
+
+func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error {
+ if raft.IsEmptySnap(snapshot) {
+ return nil
+ }
+ return s.save(&snapshot)
+}
+
+func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error {
+ start := time.Now()
+
+ fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix)
+ b := pbutil.MustMarshal(snapshot)
+ crc := crc32.Update(0, crcTable, b)
+ snap := snappb.Snapshot{Crc: crc, Data: b}
+ d, err := snap.Marshal()
+ if err != nil {
+ return err
+ }
+ snapMarshallingSec.Observe(time.Since(start).Seconds())
+
+ spath := filepath.Join(s.dir, fname)
+
+ fsyncStart := time.Now()
+ err = pioutil.WriteAndSyncFile(spath, d, 0666)
+ snapFsyncSec.Observe(time.Since(fsyncStart).Seconds())
+
+ if err != nil {
+ if s.lg != nil {
+ s.lg.Warn("failed to write a snap file", zap.String("path", spath), zap.Error(err))
+ }
+ rerr := os.Remove(spath)
+ if rerr != nil {
+ if s.lg != nil {
+ s.lg.Warn("failed to remove a broken snap file", zap.String("path", spath), zap.Error(err))
+ } else {
+ plog.Errorf("failed to remove broken snapshot file %s", spath)
+ }
+ }
+ return err
+ }
+
+ snapSaveSec.Observe(time.Since(start).Seconds())
+ return nil
+}
+
+// Load returns the newest snapshot.
+func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
+ return s.loadMatching(func(*raftpb.Snapshot) bool { return true })
+}
+
+// LoadNewestAvailable loads the newest snapshot available that is in walSnaps.
+func (s *Snapshotter) LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error) {
+ return s.loadMatching(func(snapshot *raftpb.Snapshot) bool {
+ m := snapshot.Metadata
+ for i := len(walSnaps) - 1; i >= 0; i-- {
+ if m.Term == walSnaps[i].Term && m.Index == walSnaps[i].Index {
+ return true
+ }
+ }
+ return false
+ })
+}
+
+// loadMatching returns the newest snapshot where matchFn returns true.
+func (s *Snapshotter) loadMatching(matchFn func(*raftpb.Snapshot) bool) (*raftpb.Snapshot, error) {
+ names, err := s.snapNames()
+ if err != nil {
+ return nil, err
+ }
+ var snap *raftpb.Snapshot
+ for _, name := range names {
+ if snap, err = loadSnap(s.lg, s.dir, name); err == nil && matchFn(snap) {
+ return snap, nil
+ }
+ }
+ return nil, ErrNoSnapshot
+}
+
+func loadSnap(lg *zap.Logger, dir, name string) (*raftpb.Snapshot, error) {
+ fpath := filepath.Join(dir, name)
+ snap, err := Read(lg, fpath)
+ if err != nil {
+ brokenPath := fpath + ".broken"
+ if lg != nil {
+ lg.Warn("failed to read a snap file", zap.String("path", fpath), zap.Error(err))
+ }
+ if rerr := os.Rename(fpath, brokenPath); rerr != nil {
+ if lg != nil {
+ lg.Warn("failed to rename a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath), zap.Error(rerr))
+ } else {
+ plog.Warningf("cannot rename broken snapshot file %v to %v: %v", fpath, brokenPath, rerr)
+ }
+ } else {
+ if lg != nil {
+ lg.Warn("renamed to a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath))
+ }
+ }
+ }
+ return snap, err
+}
+
+// Read reads the snapshot named by snapname and returns the snapshot.
+func Read(lg *zap.Logger, snapname string) (*raftpb.Snapshot, error) {
+ b, err := ioutil.ReadFile(snapname)
+ if err != nil {
+ if lg != nil {
+ lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err))
+ } else {
+ plog.Errorf("cannot read file %v: %v", snapname, err)
+ }
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ if lg != nil {
+ lg.Warn("failed to read empty snapshot file", zap.String("path", snapname))
+ } else {
+ plog.Errorf("unexpected empty snapshot")
+ }
+ return nil, ErrEmptySnapshot
+ }
+
+ var serializedSnap snappb.Snapshot
+ if err = serializedSnap.Unmarshal(b); err != nil {
+ if lg != nil {
+ lg.Warn("failed to unmarshal snappb.Snapshot", zap.String("path", snapname), zap.Error(err))
+ } else {
+ plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
+ }
+ return nil, err
+ }
+
+ if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 {
+ if lg != nil {
+ lg.Warn("failed to read empty snapshot data", zap.String("path", snapname))
+ } else {
+ plog.Errorf("unexpected empty snapshot")
+ }
+ return nil, ErrEmptySnapshot
+ }
+
+ crc := crc32.Update(0, crcTable, serializedSnap.Data)
+ if crc != serializedSnap.Crc {
+ if lg != nil {
+ lg.Warn("snap file is corrupt",
+ zap.String("path", snapname),
+ zap.Uint32("prev-crc", serializedSnap.Crc),
+ zap.Uint32("new-crc", crc),
+ )
+ } else {
+ plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname)
+ }
+ return nil, ErrCRCMismatch
+ }
+
+ var snap raftpb.Snapshot
+ if err = snap.Unmarshal(serializedSnap.Data); err != nil {
+ if lg != nil {
+ lg.Warn("failed to unmarshal raftpb.Snapshot", zap.String("path", snapname), zap.Error(err))
+ } else {
+ plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
+ }
+ return nil, err
+ }
+ return &snap, nil
+}
+
+// snapNames returns the filename of the snapshots in logical time order (from newest to oldest).
+// If there is no available snapshots, an ErrNoSnapshot will be returned.
+func (s *Snapshotter) snapNames() ([]string, error) {
+ dir, err := os.Open(s.dir)
+ if err != nil {
+ return nil, err
+ }
+ defer dir.Close()
+ names, err := dir.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ names, err = s.cleanupSnapdir(names)
+ if err != nil {
+ return nil, err
+ }
+ snaps := checkSuffix(s.lg, names)
+ if len(snaps) == 0 {
+ return nil, ErrNoSnapshot
+ }
+ sort.Sort(sort.Reverse(sort.StringSlice(snaps)))
+ return snaps, nil
+}
+
+func checkSuffix(lg *zap.Logger, names []string) []string {
+ snaps := []string{}
+ for i := range names {
+ if strings.HasSuffix(names[i], snapSuffix) {
+ snaps = append(snaps, names[i])
+ } else {
+ // If we find a file which is not a snapshot then check if it's
+ // a vaild file. If not throw out a warning.
+ if _, ok := validFiles[names[i]]; !ok {
+ if lg != nil {
+ lg.Warn("found unexpected non-snap file; skipping", zap.String("path", names[i]))
+ } else {
+ plog.Warningf("skipped unexpected non snapshot file %v", names[i])
+ }
+ }
+ }
+ }
+ return snaps
+}
+
+// cleanupSnapdir removes any files that should not be in the snapshot directory:
+// - db.tmp prefixed files that can be orphaned by defragmentation
+func (s *Snapshotter) cleanupSnapdir(filenames []string) (names []string, err error) {
+ for _, filename := range filenames {
+ if strings.HasPrefix(filename, "db.tmp") {
+ if s.lg != nil {
+ s.lg.Info("found orphaned defragmentation file; deleting", zap.String("path", filename))
+ } else {
+ plog.Infof("found orphaned defragmentation file; deleting: %s", filename)
+ }
+ if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
+ return nil, fmt.Errorf("failed to remove orphaned .snap.db file %s: %v", filename, rmErr)
+ }
+ continue
+ }
+ names = append(names, filename)
+ }
+ return names, nil
+}
+
+func (s *Snapshotter) ReleaseSnapDBs(snap raftpb.Snapshot) error {
+ dir, err := os.Open(s.dir)
+ if err != nil {
+ return err
+ }
+ defer dir.Close()
+ filenames, err := dir.Readdirnames(-1)
+ if err != nil {
+ return err
+ }
+ for _, filename := range filenames {
+ if strings.HasSuffix(filename, ".snap.db") {
+ hexIndex := strings.TrimSuffix(filepath.Base(filename), ".snap.db")
+ index, err := strconv.ParseUint(hexIndex, 16, 64)
+ if err != nil {
+ if s.lg != nil {
+ s.lg.Warn("failed to parse index from filename", zap.String("path", filename), zap.String("error", err.Error()))
+ } else {
+ plog.Warningf("failed to parse index from filename: %s (%v)", filename, err)
+ }
+ continue
+ }
+ if index < snap.Metadata.Index {
+ if s.lg != nil {
+ s.lg.Warn("found orphaned .snap.db file; deleting", zap.String("path", filename))
+ } else {
+ plog.Warningf("found orphaned .snap.db file; deleting: %s", filename)
+ }
+ if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
+ if s.lg != nil {
+ s.lg.Warn("failed to remove orphaned .snap.db file", zap.String("path", filename), zap.Error(rmErr))
+ } else {
+ plog.Warningf("failed to remove orphaned .snap.db file: %s (%v)", filename, rmErr)
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth.go
new file mode 100644
index 000000000000..b438074a4498
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth.go
@@ -0,0 +1,736 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2auth implements etcd authentication.
+package v2auth
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "path"
+ "reflect"
+ "sort"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+ "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+)
+
+const (
+ // StorePermsPrefix is the internal prefix of the storage layer dedicated to storing user data.
+ StorePermsPrefix = "/2"
+
+ // RootRoleName is the name of the ROOT role, with privileges to manage the cluster.
+ RootRoleName = "root"
+
+ // GuestRoleName is the name of the role that defines the privileges of an unauthenticated user.
+ GuestRoleName = "guest"
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd/v3", "etcdserver/auth")
+)
+
+var rootRole = Role{
+ Role: RootRoleName,
+ Permissions: Permissions{
+ KV: RWPermission{
+ Read: []string{"/*"},
+ Write: []string{"/*"},
+ },
+ },
+}
+
+var guestRole = Role{
+ Role: GuestRoleName,
+ Permissions: Permissions{
+ KV: RWPermission{
+ Read: []string{"/*"},
+ Write: []string{"/*"},
+ },
+ },
+}
+
+type doer interface {
+ Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error)
+}
+
+type Store interface {
+ AllUsers() ([]string, error)
+ GetUser(name string) (User, error)
+ CreateOrUpdateUser(user User) (out User, created bool, err error)
+ CreateUser(user User) (User, error)
+ DeleteUser(name string) error
+ UpdateUser(user User) (User, error)
+ AllRoles() ([]string, error)
+ GetRole(name string) (Role, error)
+ CreateRole(role Role) error
+ DeleteRole(name string) error
+ UpdateRole(role Role) (Role, error)
+ AuthEnabled() bool
+ EnableAuth() error
+ DisableAuth() error
+ PasswordStore
+}
+
+type PasswordStore interface {
+ CheckPassword(user User, password string) bool
+ HashPassword(password string) (string, error)
+}
+
+type store struct {
+ lg *zap.Logger
+ server doer
+ timeout time.Duration
+ ensuredOnce bool
+
+ PasswordStore
+}
+
+type User struct {
+ User string `json:"user"`
+ Password string `json:"password,omitempty"`
+ Roles []string `json:"roles"`
+ Grant []string `json:"grant,omitempty"`
+ Revoke []string `json:"revoke,omitempty"`
+}
+
+type Role struct {
+ Role string `json:"role"`
+ Permissions Permissions `json:"permissions"`
+ Grant *Permissions `json:"grant,omitempty"`
+ Revoke *Permissions `json:"revoke,omitempty"`
+}
+
+type Permissions struct {
+ KV RWPermission `json:"kv"`
+}
+
+func (p *Permissions) IsEmpty() bool {
+ return p == nil || (len(p.KV.Read) == 0 && len(p.KV.Write) == 0)
+}
+
+type RWPermission struct {
+ Read []string `json:"read"`
+ Write []string `json:"write"`
+}
+
+type Error struct {
+ Status int
+ Errmsg string
+}
+
+func (ae Error) Error() string { return ae.Errmsg }
+func (ae Error) HTTPStatus() int { return ae.Status }
+
+func authErr(hs int, s string, v ...interface{}) Error {
+ return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)}
+}
+
+func NewStore(lg *zap.Logger, server doer, timeout time.Duration) Store {
+ s := &store{
+ lg: lg,
+ server: server,
+ timeout: timeout,
+ PasswordStore: passwordStore{},
+ }
+ return s
+}
+
+// passwordStore implements PasswordStore using bcrypt to hash user passwords
+type passwordStore struct{}
+
+func (passwordStore) CheckPassword(user User, password string) bool {
+ err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
+ return err == nil
+}
+
+func (passwordStore) HashPassword(password string) (string, error) {
+ hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
+ return string(hash), err
+}
+
+func (s *store) AllUsers() ([]string, error) {
+ resp, err := s.requestResource("/users/", false)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return []string{}, nil
+ }
+ }
+ return nil, err
+ }
+ var nodes []string
+ for _, n := range resp.Event.Node.Nodes {
+ _, user := path.Split(n.Key)
+ nodes = append(nodes, user)
+ }
+ sort.Strings(nodes)
+ return nodes, nil
+}
+
+func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) }
+
+// CreateOrUpdateUser should be only used for creating the new user or when you are not
+// sure if it is a create or update. (When only password is passed in, we are not sure
+// if it is a update or create)
+func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
+ _, err = s.getUser(user.User, true)
+ if err == nil {
+ out, err = s.UpdateUser(user)
+ return out, false, err
+ }
+ u, err := s.CreateUser(user)
+ return u, true, err
+}
+
+func (s *store) CreateUser(user User) (User, error) {
+ // Attach root role to root user.
+ if user.User == "root" {
+ user = attachRootRole(user)
+ }
+ u, err := s.createUserInternal(user)
+ if err == nil {
+ if s.lg != nil {
+ s.lg.Info("created a user", zap.String("user-name", user.User))
+ } else {
+ plog.Noticef("created user %s", user.User)
+ }
+ }
+ return u, err
+}
+
+func (s *store) createUserInternal(user User) (User, error) {
+ if user.Password == "" {
+ return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User)
+ }
+ hash, err := s.HashPassword(user.Password)
+ if err != nil {
+ return user, err
+ }
+ user.Password = hash
+
+ _, err = s.createResource("/users/"+user.User, user)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeNodeExist {
+ return user, authErr(http.StatusConflict, "User %s already exists.", user.User)
+ }
+ }
+ }
+ return user, err
+}
+
+func (s *store) DeleteUser(name string) error {
+ if s.AuthEnabled() && name == "root" {
+ return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.")
+ }
+ err := s.deleteResource("/users/" + name)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return authErr(http.StatusNotFound, "User %s does not exist", name)
+ }
+ }
+ return err
+ }
+ if s.lg != nil {
+ s.lg.Info("deleted a user", zap.String("user-name", name))
+ } else {
+ plog.Noticef("deleted user %s", name)
+ }
+ return nil
+}
+
+func (s *store) UpdateUser(user User) (User, error) {
+ old, err := s.getUser(user.User, true)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User)
+ }
+ }
+ return old, err
+ }
+
+ newUser, err := old.merge(s.lg, user, s.PasswordStore)
+ if err != nil {
+ return old, err
+ }
+ if reflect.DeepEqual(old, newUser) {
+ return old, authErr(http.StatusBadRequest, "User not updated. Use grant/revoke/password to update the user.")
+ }
+ _, err = s.updateResource("/users/"+user.User, newUser)
+ if err == nil {
+ if s.lg != nil {
+ s.lg.Info("updated a user", zap.String("user-name", user.User))
+ } else {
+ plog.Noticef("updated user %s", user.User)
+ }
+ }
+ return newUser, err
+}
+
+func (s *store) AllRoles() ([]string, error) {
+ nodes := []string{RootRoleName}
+ resp, err := s.requestResource("/roles/", false)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return nodes, nil
+ }
+ }
+ return nil, err
+ }
+ for _, n := range resp.Event.Node.Nodes {
+ _, role := path.Split(n.Key)
+ nodes = append(nodes, role)
+ }
+ sort.Strings(nodes)
+ return nodes, nil
+}
+
+func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) }
+
+func (s *store) CreateRole(role Role) error {
+ if role.Role == RootRoleName {
+ return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
+ }
+ _, err := s.createResource("/roles/"+role.Role, role)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeNodeExist {
+ return authErr(http.StatusConflict, "Role %s already exists.", role.Role)
+ }
+ }
+ }
+ if err == nil {
+ if s.lg != nil {
+ s.lg.Info("created a new role", zap.String("role-name", role.Role))
+ } else {
+ plog.Noticef("created new role %s", role.Role)
+ }
+ }
+ return err
+}
+
+func (s *store) DeleteRole(name string) error {
+ if name == RootRoleName {
+ return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name)
+ }
+ err := s.deleteResource("/roles/" + name)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return authErr(http.StatusNotFound, "Role %s doesn't exist.", name)
+ }
+ }
+ }
+ if err == nil {
+ if s.lg != nil {
+ s.lg.Info("delete a new role", zap.String("role-name", name))
+ } else {
+ plog.Noticef("deleted role %s", name)
+ }
+ }
+ return err
+}
+
+func (s *store) UpdateRole(role Role) (Role, error) {
+ if role.Role == RootRoleName {
+ return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
+ }
+ old, err := s.getRole(role.Role, true)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role)
+ }
+ }
+ return old, err
+ }
+ newRole, err := old.merge(s.lg, role)
+ if err != nil {
+ return old, err
+ }
+ if reflect.DeepEqual(old, newRole) {
+ return old, authErr(http.StatusBadRequest, "Role not updated. Use grant/revoke to update the role.")
+ }
+ _, err = s.updateResource("/roles/"+role.Role, newRole)
+ if err == nil {
+ if s.lg != nil {
+ s.lg.Info("updated a new role", zap.String("role-name", role.Role))
+ } else {
+ plog.Noticef("updated role %s", role.Role)
+ }
+ }
+ return newRole, err
+}
+
+func (s *store) AuthEnabled() bool {
+ return s.detectAuth()
+}
+
+func (s *store) EnableAuth() error {
+ if s.AuthEnabled() {
+ return authErr(http.StatusConflict, "already enabled")
+ }
+
+ if _, err := s.getUser("root", true); err != nil {
+ return authErr(http.StatusConflict, "No root user available, please create one")
+ }
+ if _, err := s.getRole(GuestRoleName, true); err != nil {
+ if s.lg != nil {
+ s.lg.Info(
+ "no guest role access found; creating default",
+ zap.String("role-name", GuestRoleName),
+ )
+ } else {
+ plog.Printf("no guest role access found, creating default")
+ }
+ if err := s.CreateRole(guestRole); err != nil {
+ if s.lg != nil {
+ s.lg.Warn(
+ "failed to create a guest role; aborting auth enable",
+ zap.String("role-name", GuestRoleName),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("error creating guest role. aborting auth enable.")
+ }
+ return err
+ }
+ }
+
+ if err := s.enableAuth(); err != nil {
+ if s.lg != nil {
+ s.lg.Warn("failed to enable auth", zap.Error(err))
+ } else {
+ plog.Errorf("error enabling auth (%v)", err)
+ }
+ return err
+ }
+
+ if s.lg != nil {
+ s.lg.Info("enabled auth")
+ } else {
+ plog.Noticef("auth: enabled auth")
+ }
+ return nil
+}
+
+func (s *store) DisableAuth() error {
+ if !s.AuthEnabled() {
+ return authErr(http.StatusConflict, "already disabled")
+ }
+
+ err := s.disableAuth()
+ if err == nil {
+ if s.lg != nil {
+ s.lg.Info("disabled auth")
+ } else {
+ plog.Noticef("auth: disabled auth")
+ }
+ } else {
+ if s.lg != nil {
+ s.lg.Warn("failed to disable auth", zap.Error(err))
+ } else {
+ plog.Errorf("error disabling auth (%v)", err)
+ }
+ }
+ return err
+}
+
+// merge applies the properties of the passed-in User to the User on which it
+// is called and returns a new User with these modifications applied. Think of
+// all Users as immutable sets of data. Merge allows you to perform the set
+// operations (desired grants and revokes) atomically
+func (ou User) merge(lg *zap.Logger, nu User, s PasswordStore) (User, error) {
+ var out User
+ if ou.User != nu.User {
+ return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User)
+ }
+ out.User = ou.User
+ if nu.Password != "" {
+ hash, err := s.HashPassword(nu.Password)
+ if err != nil {
+ return ou, err
+ }
+ out.Password = hash
+ } else {
+ out.Password = ou.Password
+ }
+ currentRoles := types.NewUnsafeSet(ou.Roles...)
+ for _, g := range nu.Grant {
+ if currentRoles.Contains(g) {
+ if lg != nil {
+ lg.Warn(
+ "attempted to grant a duplicate role for a user",
+ zap.String("user-name", nu.User),
+ zap.String("role-name", g),
+ )
+ } else {
+ plog.Noticef("granting duplicate role %s for user %s", g, nu.User)
+ }
+ return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User))
+ }
+ currentRoles.Add(g)
+ }
+ for _, r := range nu.Revoke {
+ if !currentRoles.Contains(r) {
+ if lg != nil {
+ lg.Warn(
+ "attempted to revoke a ungranted role for a user",
+ zap.String("user-name", nu.User),
+ zap.String("role-name", r),
+ )
+ } else {
+ plog.Noticef("revoking ungranted role %s for user %s", r, nu.User)
+ }
+ return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User))
+ }
+ currentRoles.Remove(r)
+ }
+ out.Roles = currentRoles.Values()
+ sort.Strings(out.Roles)
+ return out, nil
+}
+
+// merge for a role works the same as User above -- atomic Role application to
+// each of the substructures.
+func (r Role) merge(lg *zap.Logger, n Role) (Role, error) {
+ var out Role
+ var err error
+ if r.Role != n.Role {
+ return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role)
+ }
+ out.Role = r.Role
+ out.Permissions, err = r.Permissions.Grant(n.Grant)
+ if err != nil {
+ return out, err
+ }
+ out.Permissions, err = out.Permissions.Revoke(lg, n.Revoke)
+ return out, err
+}
+
+func (r Role) HasKeyAccess(key string, write bool) bool {
+ if r.Role == RootRoleName {
+ return true
+ }
+ return r.Permissions.KV.HasAccess(key, write)
+}
+
+func (r Role) HasRecursiveAccess(key string, write bool) bool {
+ if r.Role == RootRoleName {
+ return true
+ }
+ return r.Permissions.KV.HasRecursiveAccess(key, write)
+}
+
+// Grant adds a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (p Permissions) Grant(n *Permissions) (Permissions, error) {
+ var out Permissions
+ var err error
+ if n == nil {
+ return p, nil
+ }
+ out.KV, err = p.KV.Grant(n.KV)
+ return out, err
+}
+
+// Revoke removes a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (p Permissions) Revoke(lg *zap.Logger, n *Permissions) (Permissions, error) {
+ var out Permissions
+ var err error
+ if n == nil {
+ return p, nil
+ }
+ out.KV, err = p.KV.Revoke(lg, n.KV)
+ return out, err
+}
+
+// Grant adds a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) {
+ var out RWPermission
+ currentRead := types.NewUnsafeSet(rw.Read...)
+ for _, r := range n.Read {
+ if currentRead.Contains(r) {
+ return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r)
+ }
+ currentRead.Add(r)
+ }
+ currentWrite := types.NewUnsafeSet(rw.Write...)
+ for _, w := range n.Write {
+ if currentWrite.Contains(w) {
+ return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w)
+ }
+ currentWrite.Add(w)
+ }
+ out.Read = currentRead.Values()
+ out.Write = currentWrite.Values()
+ sort.Strings(out.Read)
+ sort.Strings(out.Write)
+ return out, nil
+}
+
+// Revoke removes a set of permissions to the permission object on which it is called,
+// returning a new permission object.
+func (rw RWPermission) Revoke(lg *zap.Logger, n RWPermission) (RWPermission, error) {
+ var out RWPermission
+ currentRead := types.NewUnsafeSet(rw.Read...)
+ for _, r := range n.Read {
+ if !currentRead.Contains(r) {
+ if lg != nil {
+ lg.Info(
+ "revoking ungranted read permission",
+ zap.String("read-permission", r),
+ )
+ } else {
+ plog.Noticef("revoking ungranted read permission %s", r)
+ }
+ continue
+ }
+ currentRead.Remove(r)
+ }
+ currentWrite := types.NewUnsafeSet(rw.Write...)
+ for _, w := range n.Write {
+ if !currentWrite.Contains(w) {
+ if lg != nil {
+ lg.Info(
+ "revoking ungranted write permission",
+ zap.String("write-permission", w),
+ )
+ } else {
+ plog.Noticef("revoking ungranted write permission %s", w)
+ }
+ continue
+ }
+ currentWrite.Remove(w)
+ }
+ out.Read = currentRead.Values()
+ out.Write = currentWrite.Values()
+ sort.Strings(out.Read)
+ sort.Strings(out.Write)
+ return out, nil
+}
+
+func (rw RWPermission) HasAccess(key string, write bool) bool {
+ var list []string
+ if write {
+ list = rw.Write
+ } else {
+ list = rw.Read
+ }
+ for _, pat := range list {
+ match, err := simpleMatch(pat, key)
+ if err == nil && match {
+ return true
+ }
+ }
+ return false
+}
+
+func (rw RWPermission) HasRecursiveAccess(key string, write bool) bool {
+ list := rw.Read
+ if write {
+ list = rw.Write
+ }
+ for _, pat := range list {
+ match, err := prefixMatch(pat, key)
+ if err == nil && match {
+ return true
+ }
+ }
+ return false
+}
+
+func simpleMatch(pattern string, key string) (match bool, err error) {
+ if pattern[len(pattern)-1] == '*' {
+ return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
+ }
+ return key == pattern, nil
+}
+
+func prefixMatch(pattern string, key string) (match bool, err error) {
+ if pattern[len(pattern)-1] != '*' {
+ return false, nil
+ }
+ return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
+}
+
+func attachRootRole(u User) User {
+ inRoles := false
+ for _, r := range u.Roles {
+ if r == RootRoleName {
+ inRoles = true
+ break
+ }
+ }
+ if !inRoles {
+ u.Roles = append(u.Roles, RootRoleName)
+ }
+ return u
+}
+
+func (s *store) getUser(name string, quorum bool) (User, error) {
+ resp, err := s.requestResource("/users/"+name, quorum)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
+ }
+ }
+ return User{}, err
+ }
+ var u User
+ err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
+ if err != nil {
+ return u, err
+ }
+ // Attach root role to root user.
+ if u.User == "root" {
+ u = attachRootRole(u)
+ }
+ return u, nil
+}
+
+func (s *store) getRole(name string, quorum bool) (Role, error) {
+ if name == RootRoleName {
+ return rootRole, nil
+ }
+ resp, err := s.requestResource("/roles/"+name, quorum)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
+ }
+ }
+ return Role{}, err
+ }
+ var r Role
+ err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
+ return r, err
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth_requests.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth_requests.go
new file mode 100644
index 000000000000..d6574ecca631
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2auth/auth_requests.go
@@ -0,0 +1,189 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2auth
+
+import (
+ "context"
+ "encoding/json"
+ "path"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+ "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ "go.uber.org/zap"
+)
+
+func (s *store) ensureAuthDirectories() error {
+ if s.ensuredOnce {
+ return nil
+ }
+ for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} {
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ pe := false
+ rr := etcdserverpb.Request{
+ Method: "PUT",
+ Path: res,
+ Dir: true,
+ PrevExist: &pe,
+ }
+ _, err := s.server.Do(ctx, rr)
+ cancel()
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeNodeExist {
+ continue
+ }
+ }
+ if s.lg != nil {
+ s.lg.Warn(
+ "failed to create auth directories",
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("failed to create auth directories in the store (%v)", err)
+ }
+ return err
+ }
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ defer cancel()
+ pe := false
+ rr := etcdserverpb.Request{
+ Method: "PUT",
+ Path: StorePermsPrefix + "/enabled",
+ Val: "false",
+ PrevExist: &pe,
+ }
+ _, err := s.server.Do(ctx, rr)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeNodeExist {
+ s.ensuredOnce = true
+ return nil
+ }
+ }
+ return err
+ }
+ s.ensuredOnce = true
+ return nil
+}
+
+func (s *store) enableAuth() error {
+ _, err := s.updateResource("/enabled", true)
+ return err
+}
+func (s *store) disableAuth() error {
+ _, err := s.updateResource("/enabled", false)
+ return err
+}
+
+func (s *store) detectAuth() bool {
+ if s.server == nil {
+ return false
+ }
+ value, err := s.requestResource("/enabled", false)
+ if err != nil {
+ if e, ok := err.(*v2error.Error); ok {
+ if e.ErrorCode == v2error.EcodeKeyNotFound {
+ return false
+ }
+ }
+ if s.lg != nil {
+ s.lg.Warn(
+ "failed to detect auth settings",
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("failed to detect auth settings (%s)", err)
+ }
+ return false
+ }
+
+ var u bool
+ err = json.Unmarshal([]byte(*value.Event.Node.Value), &u)
+ if err != nil {
+ if s.lg != nil {
+ s.lg.Warn(
+ "internal bookkeeping value for enabled isn't valid JSON",
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err)
+ }
+ return false
+ }
+ return u
+}
+
+func (s *store) requestResource(res string, quorum bool) (etcdserver.Response, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ defer cancel()
+ p := path.Join(StorePermsPrefix, res)
+ method := "GET"
+ if quorum {
+ method = "QGET"
+ }
+ rr := etcdserverpb.Request{
+ Method: method,
+ Path: p,
+ Dir: false, // TODO: always false?
+ }
+ return s.server.Do(ctx, rr)
+}
+
+func (s *store) updateResource(res string, value interface{}) (etcdserver.Response, error) {
+ return s.setResource(res, value, true)
+}
+func (s *store) createResource(res string, value interface{}) (etcdserver.Response, error) {
+ return s.setResource(res, value, false)
+}
+func (s *store) setResource(res string, value interface{}, prevexist bool) (etcdserver.Response, error) {
+ err := s.ensureAuthDirectories()
+ if err != nil {
+ return etcdserver.Response{}, err
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ defer cancel()
+ data, err := json.Marshal(value)
+ if err != nil {
+ return etcdserver.Response{}, err
+ }
+ p := path.Join(StorePermsPrefix, res)
+ rr := etcdserverpb.Request{
+ Method: "PUT",
+ Path: p,
+ Val: string(data),
+ PrevExist: &prevexist,
+ }
+ return s.server.Do(ctx, rr)
+}
+
+func (s *store) deleteResource(res string) error {
+ err := s.ensureAuthDirectories()
+ if err != nil {
+ return err
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
+ defer cancel()
+ pex := true
+ p := path.Join(StorePermsPrefix, res)
+ _, err = s.server.Do(ctx, etcdserverpb.Request{
+ Method: "DELETE",
+ Path: p,
+ PrevExist: &pex,
+ })
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2discovery/discovery.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2discovery/discovery.go
new file mode 100644
index 000000000000..cf770b378594
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2discovery/discovery.go
@@ -0,0 +1,440 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2discovery provides an implementation of the cluster discovery that
+// is used by etcd with v2 client.
+package v2discovery
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "net/http"
+ "net/url"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/client"
+ "go.etcd.io/etcd/pkg/transport"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/coreos/pkg/capnslog"
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "discovery")
+
+ ErrInvalidURL = errors.New("discovery: invalid URL")
+ ErrBadSizeKey = errors.New("discovery: size key is bad")
+ ErrSizeNotFound = errors.New("discovery: size key not found")
+ ErrTokenNotFound = errors.New("discovery: token not found")
+ ErrDuplicateID = errors.New("discovery: found duplicate id")
+ ErrDuplicateName = errors.New("discovery: found duplicate name")
+ ErrFullCluster = errors.New("discovery: cluster is full")
+ ErrTooManyRetries = errors.New("discovery: too many retries")
+ ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint")
+)
+
+var (
+ // Number of retries discovery will attempt before giving up and erroring out.
+ nRetries = uint(math.MaxUint32)
+ maxExpoentialRetries = uint(8)
+)
+
+// JoinCluster will connect to the discovery service at the given url, and
+// register the server represented by the given id and config to the cluster
+func JoinCluster(lg *zap.Logger, durl, dproxyurl string, id types.ID, config string) (string, error) {
+ d, err := newDiscovery(lg, durl, dproxyurl, id)
+ if err != nil {
+ return "", err
+ }
+ return d.joinCluster(config)
+}
+
+// GetCluster will connect to the discovery service at the given url and
+// retrieve a string describing the cluster
+func GetCluster(lg *zap.Logger, durl, dproxyurl string) (string, error) {
+ d, err := newDiscovery(lg, durl, dproxyurl, 0)
+ if err != nil {
+ return "", err
+ }
+ return d.getCluster()
+}
+
+type discovery struct {
+ lg *zap.Logger
+ cluster string
+ id types.ID
+ c client.KeysAPI
+ retries uint
+ url *url.URL
+
+ clock clockwork.Clock
+}
+
+// newProxyFunc builds a proxy function from the given string, which should
+// represent a URL that can be used as a proxy. It performs basic
+// sanitization of the URL and returns any error encountered.
+func newProxyFunc(lg *zap.Logger, proxy string) (func(*http.Request) (*url.URL, error), error) {
+ if proxy == "" {
+ return nil, nil
+ }
+ // Do a small amount of URL sanitization to help the user
+ // Derived from net/http.ProxyFromEnvironment
+ proxyURL, err := url.Parse(proxy)
+ if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
+ // proxy was bogus. Try prepending "http://" to it and
+ // see if that parses correctly. If not, we ignore the
+ // error and complain about the original one
+ var err2 error
+ proxyURL, err2 = url.Parse("http://" + proxy)
+ if err2 == nil {
+ err = nil
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
+ }
+
+ if lg != nil {
+ lg.Info("running proxy with discovery", zap.String("proxy-url", proxyURL.String()))
+ } else {
+ plog.Infof("using proxy %q", proxyURL.String())
+ }
+ return http.ProxyURL(proxyURL), nil
+}
+
+func newDiscovery(lg *zap.Logger, durl, dproxyurl string, id types.ID) (*discovery, error) {
+ u, err := url.Parse(durl)
+ if err != nil {
+ return nil, err
+ }
+ token := u.Path
+ u.Path = ""
+ pf, err := newProxyFunc(lg, dproxyurl)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
+ tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second)
+ if err != nil {
+ return nil, err
+ }
+ tr.Proxy = pf
+ cfg := client.Config{
+ Transport: tr,
+ Endpoints: []string{u.String()},
+ }
+ c, err := client.New(cfg)
+ if err != nil {
+ return nil, err
+ }
+ dc := client.NewKeysAPIWithPrefix(c, "")
+ return &discovery{
+ lg: lg,
+ cluster: token,
+ c: dc,
+ id: id,
+ url: u,
+ clock: clockwork.NewRealClock(),
+ }, nil
+}
+
+func (d *discovery) joinCluster(config string) (string, error) {
+ // fast path: if the cluster is full, return the error
+ // do not need to register to the cluster in this case.
+ if _, _, _, err := d.checkCluster(); err != nil {
+ return "", err
+ }
+
+ if err := d.createSelf(config); err != nil {
+ // Fails, even on a timeout, if createSelf times out.
+ // TODO(barakmich): Retrying the same node might want to succeed here
+ // (ie, createSelf should be idempotent for discovery).
+ return "", err
+ }
+
+ nodes, size, index, err := d.checkCluster()
+ if err != nil {
+ return "", err
+ }
+
+ all, err := d.waitNodes(nodes, size, index)
+ if err != nil {
+ return "", err
+ }
+
+ return nodesToCluster(all, size)
+}
+
+func (d *discovery) getCluster() (string, error) {
+ nodes, size, index, err := d.checkCluster()
+ if err != nil {
+ if err == ErrFullCluster {
+ return nodesToCluster(nodes, size)
+ }
+ return "", err
+ }
+
+ all, err := d.waitNodes(nodes, size, index)
+ if err != nil {
+ return "", err
+ }
+ return nodesToCluster(all, size)
+}
+
+func (d *discovery) createSelf(contents string) error {
+ ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ resp, err := d.c.Create(ctx, d.selfKey(), contents)
+ cancel()
+ if err != nil {
+ if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist {
+ return ErrDuplicateID
+ }
+ return err
+ }
+
+ // ensure self appears on the server we connected to
+ w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1})
+ _, err = w.Next(context.Background())
+ return err
+}
+
+func (d *discovery) checkCluster() ([]*client.Node, int, uint64, error) {
+ configKey := path.Join("/", d.cluster, "_config")
+ ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ // find cluster size
+ resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil)
+ cancel()
+ if err != nil {
+ if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound {
+ return nil, 0, 0, ErrSizeNotFound
+ }
+ if err == client.ErrInvalidJSON {
+ return nil, 0, 0, ErrBadDiscoveryEndpoint
+ }
+ if ce, ok := err.(*client.ClusterError); ok {
+ if d.lg != nil {
+ d.lg.Warn(
+ "failed to get from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("path", path.Join(configKey, "size")),
+ zap.Error(err),
+ zap.String("err-detail", ce.Detail()),
+ )
+ } else {
+ plog.Error(ce.Detail())
+ }
+ return d.checkClusterRetry()
+ }
+ return nil, 0, 0, err
+ }
+ size, err := strconv.Atoi(resp.Node.Value)
+ if err != nil {
+ return nil, 0, 0, ErrBadSizeKey
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ resp, err = d.c.Get(ctx, d.cluster, nil)
+ cancel()
+ if err != nil {
+ if ce, ok := err.(*client.ClusterError); ok {
+ if d.lg != nil {
+ d.lg.Warn(
+ "failed to get from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("path", d.cluster),
+ zap.Error(err),
+ zap.String("err-detail", ce.Detail()),
+ )
+ } else {
+ plog.Error(ce.Detail())
+ }
+ return d.checkClusterRetry()
+ }
+ return nil, 0, 0, err
+ }
+ var nodes []*client.Node
+ // append non-config keys to nodes
+ for _, n := range resp.Node.Nodes {
+ if path.Base(n.Key) != path.Base(configKey) {
+ nodes = append(nodes, n)
+ }
+ }
+
+ snodes := sortableNodes{nodes}
+ sort.Sort(snodes)
+
+ // find self position
+ for i := range nodes {
+ if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
+ break
+ }
+ if i >= size-1 {
+ return nodes[:size], size, resp.Index, ErrFullCluster
+ }
+ }
+ return nodes, size, resp.Index, nil
+}
+
+func (d *discovery) logAndBackoffForRetry(step string) {
+ d.retries++
+ // logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
+ retries := d.retries
+ if retries > maxExpoentialRetries {
+ retries = maxExpoentialRetries
+ }
+ retryTimeInSecond := time.Duration(0x1< size {
+ nodes = nodes[:size]
+ }
+ // watch from the next index
+ w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true})
+ all := make([]*client.Node, len(nodes))
+ copy(all, nodes)
+ for _, n := range all {
+ if path.Base(n.Key) == path.Base(d.selfKey()) {
+ if d.lg != nil {
+ d.lg.Info(
+ "found self from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("self", path.Base(d.selfKey())),
+ )
+ } else {
+ plog.Noticef("found self %s in the cluster", path.Base(d.selfKey()))
+ }
+ } else {
+ if d.lg != nil {
+ d.lg.Info(
+ "found peer from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("peer", path.Base(n.Key)),
+ )
+ } else {
+ plog.Noticef("found peer %s in the cluster", path.Base(n.Key))
+ }
+ }
+ }
+
+ // wait for others
+ for len(all) < size {
+ if d.lg != nil {
+ d.lg.Info(
+ "found peers from discovery server; waiting for more",
+ zap.String("discovery-url", d.url.String()),
+ zap.Int("found-peers", len(all)),
+ zap.Int("needed-peers", size-len(all)),
+ )
+ } else {
+ plog.Noticef("found %d peer(s), waiting for %d more", len(all), size-len(all))
+ }
+ resp, err := w.Next(context.Background())
+ if err != nil {
+ if ce, ok := err.(*client.ClusterError); ok {
+ plog.Error(ce.Detail())
+ return d.waitNodesRetry()
+ }
+ return nil, err
+ }
+ if d.lg != nil {
+ d.lg.Info(
+ "found peer from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("peer", path.Base(resp.Node.Key)),
+ )
+ } else {
+ plog.Noticef("found peer %s in the cluster", path.Base(resp.Node.Key))
+ }
+ all = append(all, resp.Node)
+ }
+ if d.lg != nil {
+ d.lg.Info(
+ "found all needed peers from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.Int("found-peers", len(all)),
+ )
+ } else {
+ plog.Noticef("found %d needed peer(s)", len(all))
+ }
+ return all, nil
+}
+
+func (d *discovery) selfKey() string {
+ return path.Join("/", d.cluster, d.id.String())
+}
+
+func nodesToCluster(ns []*client.Node, size int) (string, error) {
+ s := make([]string, len(ns))
+ for i, n := range ns {
+ s[i] = n.Value
+ }
+ us := strings.Join(s, ",")
+ m, err := types.NewURLsMap(us)
+ if err != nil {
+ return us, ErrInvalidURL
+ }
+ if m.Len() != size {
+ return us, ErrDuplicateName
+ }
+ return us, nil
+}
+
+type sortableNodes struct{ Nodes []*client.Node }
+
+func (ns sortableNodes) Len() int { return len(ns.Nodes) }
+func (ns sortableNodes) Less(i, j int) bool {
+ return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex
+}
+func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] }
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2error/error.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2error/error.go
new file mode 100644
index 000000000000..1244290c4a60
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2error/error.go
@@ -0,0 +1,164 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2error describes errors in etcd project. When any change happens,
+// Documentation/v2/errorcode.md needs to be updated correspondingly.
+// To be deprecated in favor of v3 APIs.
+package v2error
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+var errors = map[int]string{
+ // command related errors
+ EcodeKeyNotFound: "Key not found",
+ EcodeTestFailed: "Compare failed", //test and set
+ EcodeNotFile: "Not a file",
+ ecodeNoMorePeer: "Reached the max number of peers in the cluster",
+ EcodeNotDir: "Not a directory",
+ EcodeNodeExist: "Key already exists", // create
+ ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd",
+ EcodeRootROnly: "Root is read only",
+ EcodeDirNotEmpty: "Directory not empty",
+ ecodeExistingPeerAddr: "Peer address has existed",
+ EcodeUnauthorized: "The request requires user authentication",
+
+ // Post form related errors
+ ecodeValueRequired: "Value is Required in POST form",
+ EcodePrevValueRequired: "PrevValue is Required in POST form",
+ EcodeTTLNaN: "The given TTL in POST form is not a number",
+ EcodeIndexNaN: "The given index in POST form is not a number",
+ ecodeValueOrTTLRequired: "Value or TTL is required in POST form",
+ ecodeTimeoutNaN: "The given timeout in POST form is not a number",
+ ecodeNameRequired: "Name is required in POST form",
+ ecodeIndexOrValueRequired: "Index or value is required",
+ ecodeIndexValueMutex: "Index and value cannot both be specified",
+ EcodeInvalidField: "Invalid field",
+ EcodeInvalidForm: "Invalid POST form",
+ EcodeRefreshValue: "Value provided on refresh",
+ EcodeRefreshTTLRequired: "A TTL must be provided on refresh",
+
+ // raft related errors
+ EcodeRaftInternal: "Raft Internal Error",
+ EcodeLeaderElect: "During Leader Election",
+
+ // etcd related errors
+ EcodeWatcherCleared: "watcher is cleared due to etcd recovery",
+ EcodeEventIndexCleared: "The event in requested index is outdated and cleared",
+ ecodeStandbyInternal: "Standby Internal Error",
+ ecodeInvalidActiveSize: "Invalid active size",
+ ecodeInvalidRemoveDelay: "Standby remove delay",
+
+ // client related errors
+ ecodeClientInternal: "Client Internal Error",
+}
+
+var errorStatus = map[int]int{
+ EcodeKeyNotFound: http.StatusNotFound,
+ EcodeNotFile: http.StatusForbidden,
+ EcodeDirNotEmpty: http.StatusForbidden,
+ EcodeUnauthorized: http.StatusUnauthorized,
+ EcodeTestFailed: http.StatusPreconditionFailed,
+ EcodeNodeExist: http.StatusPreconditionFailed,
+ EcodeRaftInternal: http.StatusInternalServerError,
+ EcodeLeaderElect: http.StatusInternalServerError,
+}
+
+const (
+ EcodeKeyNotFound = 100
+ EcodeTestFailed = 101
+ EcodeNotFile = 102
+ ecodeNoMorePeer = 103
+ EcodeNotDir = 104
+ EcodeNodeExist = 105
+ ecodeKeyIsPreserved = 106
+ EcodeRootROnly = 107
+ EcodeDirNotEmpty = 108
+ ecodeExistingPeerAddr = 109
+ EcodeUnauthorized = 110
+
+ ecodeValueRequired = 200
+ EcodePrevValueRequired = 201
+ EcodeTTLNaN = 202
+ EcodeIndexNaN = 203
+ ecodeValueOrTTLRequired = 204
+ ecodeTimeoutNaN = 205
+ ecodeNameRequired = 206
+ ecodeIndexOrValueRequired = 207
+ ecodeIndexValueMutex = 208
+ EcodeInvalidField = 209
+ EcodeInvalidForm = 210
+ EcodeRefreshValue = 211
+ EcodeRefreshTTLRequired = 212
+
+ EcodeRaftInternal = 300
+ EcodeLeaderElect = 301
+
+ EcodeWatcherCleared = 400
+ EcodeEventIndexCleared = 401
+ ecodeStandbyInternal = 402
+ ecodeInvalidActiveSize = 403
+ ecodeInvalidRemoveDelay = 404
+
+ ecodeClientInternal = 500
+)
+
+type Error struct {
+ ErrorCode int `json:"errorCode"`
+ Message string `json:"message"`
+ Cause string `json:"cause,omitempty"`
+ Index uint64 `json:"index"`
+}
+
+func NewRequestError(errorCode int, cause string) *Error {
+ return NewError(errorCode, cause, 0)
+}
+
+func NewError(errorCode int, cause string, index uint64) *Error {
+ return &Error{
+ ErrorCode: errorCode,
+ Message: errors[errorCode],
+ Cause: cause,
+ Index: index,
+ }
+}
+
+// Error is for the error interface
+func (e Error) Error() string {
+ return e.Message + " (" + e.Cause + ")"
+}
+
+func (e Error) toJsonString() string {
+ b, _ := json.Marshal(e)
+ return string(b)
+}
+
+func (e Error) StatusCode() int {
+ status, ok := errorStatus[e.ErrorCode]
+ if !ok {
+ status = http.StatusBadRequest
+ }
+ return status
+}
+
+func (e Error) WriteTo(w http.ResponseWriter) error {
+ w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index))
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(e.StatusCode())
+ _, err := w.Write([]byte(e.toJsonString() + "\n"))
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2http/capability.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/capability.go
new file mode 100644
index 000000000000..ed6c456d0b69
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/capability.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "fmt"
+ "net/http"
+
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/v2http/httptypes"
+)
+
+func authCapabilityHandler(fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if !api.IsCapabilityEnabled(api.AuthCapability) {
+ notCapable(w, r, api.AuthCapability)
+ return
+ }
+ fn(w, r)
+ }
+}
+
+func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) {
+ herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c))
+ if err := herr.WriteTo(w); err != nil {
+ plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2http/client.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/client.go
new file mode 100644
index 000000000000..1d1e592b25d0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/client.go
@@ -0,0 +1,788 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/etcdhttp"
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/etcdserver/api/v2auth"
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+ "go.etcd.io/etcd/etcdserver/api/v2http/httptypes"
+ stats "go.etcd.io/etcd/etcdserver/api/v2stats"
+ "go.etcd.io/etcd/etcdserver/api/v2store"
+ "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+)
+
+const (
+ authPrefix = "/v2/auth"
+ keysPrefix = "/v2/keys"
+ machinesPrefix = "/v2/machines"
+ membersPrefix = "/v2/members"
+ statsPrefix = "/v2/stats"
+)
+
+// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
+func NewClientHandler(lg *zap.Logger, server etcdserver.ServerPeer, timeout time.Duration) http.Handler {
+ mux := http.NewServeMux()
+ etcdhttp.HandleBasic(mux, server)
+ handleV2(lg, mux, server, timeout)
+ return requestLogger(lg, mux)
+}
+
+func handleV2(lg *zap.Logger, mux *http.ServeMux, server etcdserver.ServerV2, timeout time.Duration) {
+ sec := v2auth.NewStore(lg, server, timeout)
+ kh := &keysHandler{
+ lg: lg,
+ sec: sec,
+ server: server,
+ cluster: server.Cluster(),
+ timeout: timeout,
+ clientCertAuthEnabled: server.ClientCertAuthEnabled(),
+ }
+
+ sh := &statsHandler{
+ lg: lg,
+ stats: server,
+ }
+
+ mh := &membersHandler{
+ lg: lg,
+ sec: sec,
+ server: server,
+ cluster: server.Cluster(),
+ timeout: timeout,
+ clock: clockwork.NewRealClock(),
+ clientCertAuthEnabled: server.ClientCertAuthEnabled(),
+ }
+
+ mah := &machinesHandler{cluster: server.Cluster()}
+
+ sech := &authHandler{
+ lg: lg,
+ sec: sec,
+ cluster: server.Cluster(),
+ clientCertAuthEnabled: server.ClientCertAuthEnabled(),
+ }
+ mux.HandleFunc("/", http.NotFound)
+ mux.Handle(keysPrefix, kh)
+ mux.Handle(keysPrefix+"/", kh)
+ mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
+ mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
+ mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
+ mux.Handle(membersPrefix, mh)
+ mux.Handle(membersPrefix+"/", mh)
+ mux.Handle(machinesPrefix, mah)
+ handleAuth(mux, sech)
+}
+
+type keysHandler struct {
+ lg *zap.Logger
+ sec v2auth.Store
+ server etcdserver.ServerV2
+ cluster api.Cluster
+ timeout time.Duration
+ clientCertAuthEnabled bool
+}
+
+func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") {
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
+ defer cancel()
+ clock := clockwork.NewRealClock()
+ startTime := clock.Now()
+ rr, noValueOnSuccess, err := parseKeyRequest(r, clock)
+ if err != nil {
+ writeKeyError(h.lg, w, err)
+ return
+ }
+ // The path must be valid at this point (we've parsed the request successfully).
+ if !hasKeyPrefixAccess(h.lg, h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) {
+ writeKeyNoAuth(w)
+ return
+ }
+ if !rr.Wait {
+ reportRequestReceived(rr)
+ }
+ resp, err := h.server.Do(ctx, rr)
+ if err != nil {
+ err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix)
+ writeKeyError(h.lg, w, err)
+ reportRequestFailed(rr, err)
+ return
+ }
+ switch {
+ case resp.Event != nil:
+ if err := writeKeyEvent(w, resp, noValueOnSuccess); err != nil {
+ // Should never be reached
+ if h.lg != nil {
+ h.lg.Warn("failed to write key event", zap.Error(err))
+ } else {
+ plog.Errorf("error writing event (%v)", err)
+ }
+ }
+ reportRequestCompleted(rr, startTime)
+ case resp.Watcher != nil:
+ ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
+ defer cancel()
+ handleKeyWatch(ctx, h.lg, w, resp, rr.Stream)
+ default:
+ writeKeyError(h.lg, w, errors.New("received response with no Event/Watcher"))
+ }
+}
+
+type machinesHandler struct {
+ cluster api.Cluster
+}
+
+func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET", "HEAD") {
+ return
+ }
+ endpoints := h.cluster.ClientURLs()
+ w.Write([]byte(strings.Join(endpoints, ", ")))
+}
+
+type membersHandler struct {
+ lg *zap.Logger
+ sec v2auth.Store
+ server etcdserver.ServerV2
+ cluster api.Cluster
+ timeout time.Duration
+ clock clockwork.Clock
+ clientCertAuthEnabled bool
+}
+
+func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") {
+ return
+ }
+ if !hasWriteRootAccess(h.lg, h.sec, r, h.clientCertAuthEnabled) {
+ writeNoAuth(h.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
+ defer cancel()
+
+ switch r.Method {
+ case "GET":
+ switch trimPrefix(r.URL.Path, membersPrefix) {
+ case "":
+ mc := newMemberCollection(h.cluster.Members())
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(mc); err != nil {
+ if h.lg != nil {
+ h.lg.Warn("failed to encode members response", zap.Error(err))
+ } else {
+ plog.Warningf("failed to encode members response (%v)", err)
+ }
+ }
+ case "leader":
+ id := h.server.Leader()
+ if id == 0 {
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election"))
+ return
+ }
+ m := newMember(h.cluster.Member(id))
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(m); err != nil {
+ if h.lg != nil {
+ h.lg.Warn("failed to encode members response", zap.Error(err))
+ } else {
+ plog.Warningf("failed to encode members response (%v)", err)
+ }
+ }
+ default:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found"))
+ }
+
+ case "POST":
+ req := httptypes.MemberCreateRequest{}
+ if ok := unmarshalRequest(h.lg, r, &req, w); !ok {
+ return
+ }
+ now := h.clock.Now()
+ m := membership.NewMember("", req.PeerURLs, "", &now)
+ _, err := h.server.AddMember(ctx, *m)
+ switch {
+ case err == membership.ErrIDExists || err == membership.ErrPeerURLexists:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
+ return
+ case err != nil:
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to add a member",
+ zap.String("member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("error adding member %s (%v)", m.ID, err)
+ }
+ writeError(h.lg, w, r, err)
+ return
+ }
+ res := newMember(m)
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusCreated)
+ if err := json.NewEncoder(w).Encode(res); err != nil {
+ if h.lg != nil {
+ h.lg.Warn("failed to encode members response", zap.Error(err))
+ } else {
+ plog.Warningf("failed to encode members response (%v)", err)
+ }
+ }
+
+ case "DELETE":
+ id, ok := getID(h.lg, r.URL.Path, w)
+ if !ok {
+ return
+ }
+ _, err := h.server.RemoveMember(ctx, uint64(id))
+ switch {
+ case err == membership.ErrIDRemoved:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id)))
+ case err == membership.ErrIDNotFound:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
+ case err != nil:
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to remove a member",
+ zap.String("member-id", id.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("error removing member %s (%v)", id, err)
+ }
+ writeError(h.lg, w, r, err)
+ default:
+ w.WriteHeader(http.StatusNoContent)
+ }
+
+ case "PUT":
+ id, ok := getID(h.lg, r.URL.Path, w)
+ if !ok {
+ return
+ }
+ req := httptypes.MemberUpdateRequest{}
+ if ok := unmarshalRequest(h.lg, r, &req, w); !ok {
+ return
+ }
+ m := membership.Member{
+ ID: id,
+ RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()},
+ }
+ _, err := h.server.UpdateMember(ctx, m)
+ switch {
+ case err == membership.ErrPeerURLexists:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
+ case err == membership.ErrIDNotFound:
+ writeError(h.lg, w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
+ case err != nil:
+ if h.lg != nil {
+ h.lg.Warn(
+ "failed to update a member",
+ zap.String("member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("error updating member %s (%v)", m.ID, err)
+ }
+ writeError(h.lg, w, r, err)
+ default:
+ w.WriteHeader(http.StatusNoContent)
+ }
+ }
+}
+
+type statsHandler struct {
+ lg *zap.Logger
+ stats stats.Stats
+}
+
+func (h *statsHandler) serveStore(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(h.stats.StoreStats())
+}
+
+func (h *statsHandler) serveSelf(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(h.stats.SelfStats())
+}
+
+func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ stats := h.stats.LeaderStats()
+ if stats == nil {
+ etcdhttp.WriteError(h.lg, w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(stats)
+}
+
+// parseKeyRequest converts a received http.Request on keysPrefix to
+// a server Request, performing validation of supplied fields as appropriate.
+// If any validation fails, an empty Request and non-nil error is returned.
+func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) {
+ var noValueOnSuccess bool
+ emptyReq := etcdserverpb.Request{}
+
+ err := r.ParseForm()
+ if err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidForm,
+ err.Error(),
+ )
+ }
+
+ if !strings.HasPrefix(r.URL.Path, keysPrefix) {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidForm,
+ "incorrect key prefix",
+ )
+ }
+ p := path.Join(etcdserver.StoreKeysPrefix, r.URL.Path[len(keysPrefix):])
+
+ var pIdx, wIdx uint64
+ if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeIndexNaN,
+ `invalid value for "prevIndex"`,
+ )
+ }
+ if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeIndexNaN,
+ `invalid value for "waitIndex"`,
+ )
+ }
+
+ var rec, sort, wait, dir, quorum, stream bool
+ if rec, err = getBool(r.Form, "recursive"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "recursive"`,
+ )
+ }
+ if sort, err = getBool(r.Form, "sorted"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "sorted"`,
+ )
+ }
+ if wait, err = getBool(r.Form, "wait"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "wait"`,
+ )
+ }
+ // TODO(jonboulle): define what parameters dir is/isn't compatible with?
+ if dir, err = getBool(r.Form, "dir"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "dir"`,
+ )
+ }
+ if quorum, err = getBool(r.Form, "quorum"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "quorum"`,
+ )
+ }
+ if stream, err = getBool(r.Form, "stream"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "stream"`,
+ )
+ }
+
+ if wait && r.Method != "GET" {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `"wait" can only be used with GET requests`,
+ )
+ }
+
+ pV := r.FormValue("prevValue")
+ if _, ok := r.Form["prevValue"]; ok && pV == "" {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodePrevValueRequired,
+ `"prevValue" cannot be empty`,
+ )
+ }
+
+ if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ `invalid value for "noValueOnSuccess"`,
+ )
+ }
+
+ // TTL is nullable, so leave it null if not specified
+ // or an empty string
+ var ttl *uint64
+ if len(r.FormValue("ttl")) > 0 {
+ i, err := getUint64(r.Form, "ttl")
+ if err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeTTLNaN,
+ `invalid value for "ttl"`,
+ )
+ }
+ ttl = &i
+ }
+
+ // prevExist is nullable, so leave it null if not specified
+ var pe *bool
+ if _, ok := r.Form["prevExist"]; ok {
+ bv, err := getBool(r.Form, "prevExist")
+ if err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ "invalid value for prevExist",
+ )
+ }
+ pe = &bv
+ }
+
+ // refresh is nullable, so leave it null if not specified
+ var refresh *bool
+ if _, ok := r.Form["refresh"]; ok {
+ bv, err := getBool(r.Form, "refresh")
+ if err != nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeInvalidField,
+ "invalid value for refresh",
+ )
+ }
+ refresh = &bv
+ if refresh != nil && *refresh {
+ val := r.FormValue("value")
+ if _, ok := r.Form["value"]; ok && val != "" {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeRefreshValue,
+ `A value was provided on a refresh`,
+ )
+ }
+ if ttl == nil {
+ return emptyReq, false, v2error.NewRequestError(
+ v2error.EcodeRefreshTTLRequired,
+ `No TTL value set`,
+ )
+ }
+ }
+ }
+
+ rr := etcdserverpb.Request{
+ Method: r.Method,
+ Path: p,
+ Val: r.FormValue("value"),
+ Dir: dir,
+ PrevValue: pV,
+ PrevIndex: pIdx,
+ PrevExist: pe,
+ Wait: wait,
+ Since: wIdx,
+ Recursive: rec,
+ Sorted: sort,
+ Quorum: quorum,
+ Stream: stream,
+ }
+
+ if pe != nil {
+ rr.PrevExist = pe
+ }
+
+ if refresh != nil {
+ rr.Refresh = refresh
+ }
+
+ // Null TTL is equivalent to unset Expiration
+ if ttl != nil {
+ expr := time.Duration(*ttl) * time.Second
+ rr.Expiration = clock.Now().Add(expr).UnixNano()
+ }
+
+ return rr, noValueOnSuccess, nil
+}
+
+// writeKeyEvent trims the prefix of key path in a single Event under
+// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
+// ResponseWriter, along with the appropriate headers.
+func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error {
+ ev := resp.Event
+ if ev == nil {
+ return errors.New("cannot write empty Event")
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex))
+ w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
+ w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
+
+ if ev.IsCreated() {
+ w.WriteHeader(http.StatusCreated)
+ }
+
+ ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
+ if noValueOnSuccess &&
+ (ev.Action == v2store.Set || ev.Action == v2store.CompareAndSwap ||
+ ev.Action == v2store.Create || ev.Action == v2store.Update) {
+ ev.Node = nil
+ ev.PrevNode = nil
+ }
+ return json.NewEncoder(w).Encode(ev)
+}
+
+func writeKeyNoAuth(w http.ResponseWriter) {
+ e := v2error.NewError(v2error.EcodeUnauthorized, "Insufficient credentials", 0)
+ e.WriteTo(w)
+}
+
+// writeKeyError logs and writes the given Error to the ResponseWriter.
+// If Error is not an etcdErr, the error will be converted to an etcd error.
+func writeKeyError(lg *zap.Logger, w http.ResponseWriter, err error) {
+ if err == nil {
+ return
+ }
+ switch e := err.(type) {
+ case *v2error.Error:
+ e.WriteTo(w)
+ default:
+ switch err {
+ case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost:
+ if lg != nil {
+ lg.Warn(
+ "v2 response error",
+ zap.String("internal-server-error", err.Error()),
+ )
+ } else {
+ mlog.MergeError(err)
+ }
+ default:
+ if lg != nil {
+ lg.Warn(
+ "unexpected v2 response error",
+ zap.String("internal-server-error", err.Error()),
+ )
+ } else {
+ mlog.MergeErrorf("got unexpected response error (%v)", err)
+ }
+ }
+ ee := v2error.NewError(v2error.EcodeRaftInternal, err.Error(), 0)
+ ee.WriteTo(w)
+ }
+}
+
+func handleKeyWatch(ctx context.Context, lg *zap.Logger, w http.ResponseWriter, resp etcdserver.Response, stream bool) {
+ wa := resp.Watcher
+ defer wa.Remove()
+ ech := wa.EventChan()
+ var nch <-chan bool
+ if x, ok := w.(http.CloseNotifier); ok {
+ nch = x.CloseNotify()
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex()))
+ w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
+ w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
+ w.WriteHeader(http.StatusOK)
+
+ // Ensure headers are flushed early, in case of long polling
+ w.(http.Flusher).Flush()
+
+ for {
+ select {
+ case <-nch:
+ // Client closed connection. Nothing to do.
+ return
+ case <-ctx.Done():
+ // Timed out. net/http will close the connection for us, so nothing to do.
+ return
+ case ev, ok := <-ech:
+ if !ok {
+ // If the channel is closed this may be an indication of
+ // that notifications are much more than we are able to
+ // send to the client in time. Then we simply end streaming.
+ return
+ }
+ ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
+ if err := json.NewEncoder(w).Encode(ev); err != nil {
+ // Should never be reached
+ if lg != nil {
+ lg.Warn("failed to encode event", zap.Error(err))
+ } else {
+ plog.Warningf("error writing event (%v)", err)
+ }
+ return
+ }
+ if !stream {
+ return
+ }
+ w.(http.Flusher).Flush()
+ }
+ }
+}
+
+func trimEventPrefix(ev *v2store.Event, prefix string) *v2store.Event {
+ if ev == nil {
+ return nil
+ }
+ // Since the *Event may reference one in the store history
+ // history, we must copy it before modifying
+ e := ev.Clone()
+ trimNodeExternPrefix(e.Node, prefix)
+ trimNodeExternPrefix(e.PrevNode, prefix)
+ return e
+}
+
+func trimNodeExternPrefix(n *v2store.NodeExtern, prefix string) {
+ if n == nil {
+ return
+ }
+ n.Key = strings.TrimPrefix(n.Key, prefix)
+ for _, nn := range n.Nodes {
+ trimNodeExternPrefix(nn, prefix)
+ }
+}
+
+func trimErrorPrefix(err error, prefix string) error {
+ if e, ok := err.(*v2error.Error); ok {
+ e.Cause = strings.TrimPrefix(e.Cause, prefix)
+ }
+ return err
+}
+
+func unmarshalRequest(lg *zap.Logger, r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
+ ctype := r.Header.Get("Content-Type")
+ semicolonPosition := strings.Index(ctype, ";")
+ if semicolonPosition != -1 {
+ ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
+ }
+ if ctype != "application/json" {
+ writeError(lg, w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
+ return false
+ }
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
+ return false
+ }
+ if err := req.UnmarshalJSON(b); err != nil {
+ writeError(lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
+ return false
+ }
+ return true
+}
+
+func getID(lg *zap.Logger, p string, w http.ResponseWriter) (types.ID, bool) {
+ idStr := trimPrefix(p, membersPrefix)
+ if idStr == "" {
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return 0, false
+ }
+ id, err := types.IDFromString(idStr)
+ if err != nil {
+ writeError(lg, w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr)))
+ return 0, false
+ }
+ return id, true
+}
+
+// getUint64 extracts a uint64 by the given key from a Form. If the key does
+// not exist in the form, 0 is returned. If the key exists but the value is
+// badly formed, an error is returned. If multiple values are present only the
+// first is considered.
+func getUint64(form url.Values, key string) (i uint64, err error) {
+ if vals, ok := form[key]; ok {
+ i, err = strconv.ParseUint(vals[0], 10, 64)
+ }
+ return
+}
+
+// getBool extracts a bool by the given key from a Form. If the key does not
+// exist in the form, false is returned. If the key exists but the value is
+// badly formed, an error is returned. If multiple values are present only the
+// first is considered.
+func getBool(form url.Values, key string) (b bool, err error) {
+ if vals, ok := form[key]; ok {
+ b, err = strconv.ParseBool(vals[0])
+ }
+ return
+}
+
+// trimPrefix removes a given prefix and any slash following the prefix
+// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == ""
+func trimPrefix(p, prefix string) (s string) {
+ s = strings.TrimPrefix(p, prefix)
+ s = strings.TrimPrefix(s, "/")
+ return
+}
+
+func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection {
+ c := httptypes.MemberCollection(make([]httptypes.Member, len(ms)))
+
+ for i, m := range ms {
+ c[i] = newMember(m)
+ }
+
+ return &c
+}
+
+func newMember(m *membership.Member) httptypes.Member {
+ tm := httptypes.Member{
+ ID: m.ID.String(),
+ Name: m.Name,
+ PeerURLs: make([]string, len(m.PeerURLs)),
+ ClientURLs: make([]string, len(m.ClientURLs)),
+ }
+
+ copy(tm.PeerURLs, m.PeerURLs)
+ copy(tm.ClientURLs, m.ClientURLs)
+
+ return tm
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2http/client_auth.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/client_auth.go
new file mode 100644
index 000000000000..d8d6a883a934
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/client_auth.go
@@ -0,0 +1,664 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "encoding/json"
+ "net/http"
+ "path"
+ "strings"
+
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/v2auth"
+ "go.etcd.io/etcd/etcdserver/api/v2http/httptypes"
+
+ "go.uber.org/zap"
+)
+
+type authHandler struct {
+ lg *zap.Logger
+ sec v2auth.Store
+ cluster api.Cluster
+ clientCertAuthEnabled bool
+}
+
+func hasWriteRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
+ if r.Method == "GET" || r.Method == "HEAD" {
+ return true
+ }
+ return hasRootAccess(lg, sec, r, clientCertAuthEnabled)
+}
+
+func userFromBasicAuth(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User {
+ username, password, ok := r.BasicAuth()
+ if !ok {
+ if lg != nil {
+ lg.Warn("malformed basic auth encoding")
+ } else {
+ plog.Warningf("auth: malformed basic auth encoding")
+ }
+ return nil
+ }
+ user, err := sec.GetUser(username)
+ if err != nil {
+ return nil
+ }
+
+ ok = sec.CheckPassword(user, password)
+ if !ok {
+ if lg != nil {
+ lg.Warn("incorrect password", zap.String("user-name", username))
+ } else {
+ plog.Warningf("auth: incorrect password for user: %s", username)
+ }
+ return nil
+ }
+ return &user
+}
+
+func userFromClientCertificate(lg *zap.Logger, sec v2auth.Store, r *http.Request) *v2auth.User {
+ if r.TLS == nil {
+ return nil
+ }
+
+ for _, chains := range r.TLS.VerifiedChains {
+ for _, chain := range chains {
+ if lg != nil {
+ lg.Debug("found common name", zap.String("common-name", chain.Subject.CommonName))
+ } else {
+ plog.Debugf("auth: found common name %s.\n", chain.Subject.CommonName)
+ }
+ user, err := sec.GetUser(chain.Subject.CommonName)
+ if err == nil {
+ if lg != nil {
+ lg.Debug(
+ "authenticated a user via common name",
+ zap.String("user-name", user.User),
+ zap.String("common-name", chain.Subject.CommonName),
+ )
+ } else {
+ plog.Debugf("auth: authenticated user %s by cert common name.", user.User)
+ }
+ return &user
+ }
+ }
+ }
+ return nil
+}
+
+func hasRootAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
+ if sec == nil {
+ // No store means no auth available, eg, tests.
+ return true
+ }
+ if !sec.AuthEnabled() {
+ return true
+ }
+
+ var rootUser *v2auth.User
+ if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
+ rootUser = userFromClientCertificate(lg, sec, r)
+ if rootUser == nil {
+ return false
+ }
+ } else {
+ rootUser = userFromBasicAuth(lg, sec, r)
+ if rootUser == nil {
+ return false
+ }
+ }
+
+ for _, role := range rootUser.Roles {
+ if role == v2auth.RootRoleName {
+ return true
+ }
+ }
+
+ if lg != nil {
+ lg.Warn(
+ "a user does not have root role for resource",
+ zap.String("root-user", rootUser.User),
+ zap.String("root-role-name", v2auth.RootRoleName),
+ zap.String("resource-path", r.URL.Path),
+ )
+ } else {
+ plog.Warningf("auth: user %s does not have the %s role for resource %s.", rootUser.User, v2auth.RootRoleName, r.URL.Path)
+ }
+ return false
+}
+
+func hasKeyPrefixAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool {
+ if sec == nil {
+ // No store means no auth available, eg, tests.
+ return true
+ }
+ if !sec.AuthEnabled() {
+ return true
+ }
+
+ var user *v2auth.User
+ if r.Header.Get("Authorization") == "" {
+ if clientCertAuthEnabled {
+ user = userFromClientCertificate(lg, sec, r)
+ }
+ if user == nil {
+ return hasGuestAccess(lg, sec, r, key)
+ }
+ } else {
+ user = userFromBasicAuth(lg, sec, r)
+ if user == nil {
+ return false
+ }
+ }
+
+ writeAccess := r.Method != "GET" && r.Method != "HEAD"
+ for _, roleName := range user.Roles {
+ role, err := sec.GetRole(roleName)
+ if err != nil {
+ continue
+ }
+ if recursive {
+ if role.HasRecursiveAccess(key, writeAccess) {
+ return true
+ }
+ } else if role.HasKeyAccess(key, writeAccess) {
+ return true
+ }
+ }
+
+ if lg != nil {
+ lg.Warn(
+ "invalid access for user on key",
+ zap.String("user-name", user.User),
+ zap.String("key", key),
+ )
+ } else {
+ plog.Warningf("auth: invalid access for user %s on key %s.", user.User, key)
+ }
+ return false
+}
+
+func hasGuestAccess(lg *zap.Logger, sec v2auth.Store, r *http.Request, key string) bool {
+ writeAccess := r.Method != "GET" && r.Method != "HEAD"
+ role, err := sec.GetRole(v2auth.GuestRoleName)
+ if err != nil {
+ return false
+ }
+ if role.HasKeyAccess(key, writeAccess) {
+ return true
+ }
+
+ if lg != nil {
+ lg.Warn(
+ "invalid access for a guest role on key",
+ zap.String("role-name", v2auth.GuestRoleName),
+ zap.String("key", key),
+ )
+ } else {
+ plog.Warningf("auth: invalid access for unauthenticated user on resource %s.", key)
+ }
+ return false
+}
+
+func writeNoAuth(lg *zap.Logger, w http.ResponseWriter, r *http.Request) {
+ herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials")
+ if err := herr.WriteTo(w); err != nil {
+ if lg != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.Error(err),
+ )
+ } else {
+ plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
+ }
+ }
+}
+
+func handleAuth(mux *http.ServeMux, sh *authHandler) {
+ mux.HandleFunc(authPrefix+"/roles", authCapabilityHandler(sh.baseRoles))
+ mux.HandleFunc(authPrefix+"/roles/", authCapabilityHandler(sh.handleRoles))
+ mux.HandleFunc(authPrefix+"/users", authCapabilityHandler(sh.baseUsers))
+ mux.HandleFunc(authPrefix+"/users/", authCapabilityHandler(sh.handleUsers))
+ mux.HandleFunc(authPrefix+"/enable", authCapabilityHandler(sh.enableDisable))
+}
+
+func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+
+ roles, err := sh.sec.AllRoles()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ if roles == nil {
+ roles = make([]string, 0)
+ }
+
+ err = r.ParseForm()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ var rolesCollections struct {
+ Roles []v2auth.Role `json:"roles"`
+ }
+ for _, roleName := range roles {
+ var role v2auth.Role
+ role, err = sh.sec.GetRole(roleName)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ rolesCollections.Roles = append(rolesCollections.Roles, role)
+ }
+ err = json.NewEncoder(w).Encode(rolesCollections)
+
+ if err != nil {
+ if sh.lg != nil {
+ sh.lg.Warn(
+ "failed to encode base roles",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("baseRoles error encoding on %s", r.URL)
+ }
+ writeError(sh.lg, w, r, err)
+ return
+ }
+}
+
+func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) {
+ subpath := path.Clean(r.URL.Path[len(authPrefix):])
+ // Split "/roles/rolename/command".
+ // First item is an empty string, second is "roles"
+ pieces := strings.Split(subpath, "/")
+ if len(pieces) == 2 {
+ sh.baseRoles(w, r)
+ return
+ }
+ if len(pieces) != 3 {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
+ return
+ }
+ sh.forRole(w, r, pieces[2])
+}
+
+func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) {
+ if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
+ return
+ }
+ if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+
+ switch r.Method {
+ case "GET":
+ data, err := sh.sec.GetRole(role)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ err = json.NewEncoder(w).Encode(data)
+ if err != nil {
+ if sh.lg != nil {
+ sh.lg.Warn(
+ "failed to encode a role",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("forRole error encoding on %s", r.URL)
+ }
+ return
+ }
+ return
+
+ case "PUT":
+ var in v2auth.Role
+ err := json.NewDecoder(r.Body).Decode(&in)
+ if err != nil {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
+ return
+ }
+ if in.Role != role {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL"))
+ return
+ }
+
+ var out v2auth.Role
+
+ // create
+ if in.Grant.IsEmpty() && in.Revoke.IsEmpty() {
+ err = sh.sec.CreateRole(in)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ w.WriteHeader(http.StatusCreated)
+ out = in
+ } else {
+ if !in.Permissions.IsEmpty() {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke"))
+ return
+ }
+ out, err = sh.sec.UpdateRole(in)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+ }
+
+ err = json.NewEncoder(w).Encode(out)
+ if err != nil {
+ if sh.lg != nil {
+ sh.lg.Warn(
+ "failed to encode a role",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("forRole error encoding on %s", r.URL)
+ }
+ return
+ }
+ return
+
+ case "DELETE":
+ err := sh.sec.DeleteRole(role)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ }
+}
+
+type userWithRoles struct {
+ User string `json:"user"`
+ Roles []v2auth.Role `json:"roles,omitempty"`
+}
+
+type usersCollections struct {
+ Users []userWithRoles `json:"users"`
+}
+
+func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET") {
+ return
+ }
+ if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+
+ users, err := sh.sec.AllUsers()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ if users == nil {
+ users = make([]string, 0)
+ }
+
+ err = r.ParseForm()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ ucs := usersCollections{}
+ for _, userName := range users {
+ var user v2auth.User
+ user, err = sh.sec.GetUser(userName)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ uwr := userWithRoles{User: user.User}
+ for _, roleName := range user.Roles {
+ var role v2auth.Role
+ role, err = sh.sec.GetRole(roleName)
+ if err != nil {
+ continue
+ }
+ uwr.Roles = append(uwr.Roles, role)
+ }
+
+ ucs.Users = append(ucs.Users, uwr)
+ }
+ err = json.NewEncoder(w).Encode(ucs)
+
+ if err != nil {
+ if sh.lg != nil {
+ sh.lg.Warn(
+ "failed to encode users",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("baseUsers error encoding on %s", r.URL)
+ }
+ writeError(sh.lg, w, r, err)
+ return
+ }
+}
+
+func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) {
+ subpath := path.Clean(r.URL.Path[len(authPrefix):])
+ // Split "/users/username".
+ // First item is an empty string, second is "users"
+ pieces := strings.Split(subpath, "/")
+ if len(pieces) == 2 {
+ sh.baseUsers(w, r)
+ return
+ }
+ if len(pieces) != 3 {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
+ return
+ }
+ sh.forUser(w, r, pieces[2])
+}
+
+func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) {
+ if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
+ return
+ }
+ if !hasRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+
+ switch r.Method {
+ case "GET":
+ u, err := sh.sec.GetUser(user)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ err = r.ParseForm()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ uwr := userWithRoles{User: u.User}
+ for _, roleName := range u.Roles {
+ var role v2auth.Role
+ role, err = sh.sec.GetRole(roleName)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ uwr.Roles = append(uwr.Roles, role)
+ }
+ err = json.NewEncoder(w).Encode(uwr)
+
+ if err != nil {
+ if sh.lg != nil {
+ sh.lg.Warn(
+ "failed to encode roles",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("forUser error encoding on %s", r.URL)
+ }
+ return
+ }
+ return
+
+ case "PUT":
+ var u v2auth.User
+ err := json.NewDecoder(r.Body).Decode(&u)
+ if err != nil {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
+ return
+ }
+ if u.User != user {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL"))
+ return
+ }
+
+ var (
+ out v2auth.User
+ created bool
+ )
+
+ if len(u.Grant) == 0 && len(u.Revoke) == 0 {
+ // create or update
+ if len(u.Roles) != 0 {
+ out, err = sh.sec.CreateUser(u)
+ } else {
+ // if user passes in both password and roles, we are unsure about his/her
+ // intention.
+ out, created, err = sh.sec.CreateOrUpdateUser(u)
+ }
+
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ } else {
+ // update case
+ if len(u.Roles) != 0 {
+ writeError(sh.lg, w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke"))
+ return
+ }
+ out, err = sh.sec.UpdateUser(u)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ }
+
+ if created {
+ w.WriteHeader(http.StatusCreated)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ }
+
+ out.Password = ""
+
+ err = json.NewEncoder(w).Encode(out)
+ if err != nil {
+ if sh.lg != nil {
+ sh.lg.Warn(
+ "failed to encode a user",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("forUser error encoding on %s", r.URL)
+ }
+ return
+ }
+ return
+
+ case "DELETE":
+ err := sh.sec.DeleteUser(user)
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ }
+}
+
+type enabled struct {
+ Enabled bool `json:"enabled"`
+}
+
+func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
+ return
+ }
+ if !hasWriteRootAccess(sh.lg, sh.sec, r, sh.clientCertAuthEnabled) {
+ writeNoAuth(sh.lg, w, r)
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
+ w.Header().Set("Content-Type", "application/json")
+ isEnabled := sh.sec.AuthEnabled()
+ switch r.Method {
+ case "GET":
+ jsonDict := enabled{isEnabled}
+ err := json.NewEncoder(w).Encode(jsonDict)
+ if err != nil {
+ if sh.lg != nil {
+ sh.lg.Warn(
+ "failed to encode a auth state",
+ zap.String("url", r.URL.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("error encoding auth state on %s", r.URL)
+ }
+ }
+
+ case "PUT":
+ err := sh.sec.EnableAuth()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+
+ case "DELETE":
+ err := sh.sec.DisableAuth()
+ if err != nil {
+ writeError(sh.lg, w, r, err)
+ return
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2http/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/doc.go
new file mode 100644
index 000000000000..475c4b1f95a3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2http provides etcd client and server implementations.
+package v2http
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2http/http.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/http.go
new file mode 100644
index 000000000000..c6956893e147
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/http.go
@@ -0,0 +1,93 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "math"
+ "net/http"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/etcdhttp"
+ "go.etcd.io/etcd/etcdserver/api/v2auth"
+ "go.etcd.io/etcd/etcdserver/api/v2http/httptypes"
+ "go.etcd.io/etcd/pkg/logutil"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.uber.org/zap"
+)
+
+const (
+ // time to wait for a Watch request
+ defaultWatchTimeout = time.Duration(math.MaxInt64)
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api/v2http")
+ mlog = logutil.NewMergeLogger(plog)
+)
+
+func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) {
+ if err == nil {
+ return
+ }
+ if e, ok := err.(v2auth.Error); ok {
+ herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
+ if et := herr.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("v2auth-error", e.Error()),
+ zap.Error(et),
+ )
+ } else {
+ plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
+ }
+ }
+ return
+ }
+ etcdhttp.WriteError(lg, w, r, err)
+}
+
+// allowMethod verifies that the given method is one of the allowed methods,
+// and if not, it writes an error to w. A boolean is returned indicating
+// whether or not the method is allowed.
+func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
+ for _, meth := range ms {
+ if m == meth {
+ return true
+ }
+ }
+ w.Header().Set("Allow", strings.Join(ms, ","))
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return false
+}
+
+func requestLogger(lg *zap.Logger, handler http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if lg != nil {
+ lg.Debug(
+ "handling HTTP request",
+ zap.String("method", r.Method),
+ zap.String("request-uri", r.RequestURI),
+ zap.String("remote-addr", r.RemoteAddr),
+ )
+ } else {
+ plog.Debugf("[%s] %s remote:%s", r.Method, r.RequestURI, r.RemoteAddr)
+ }
+ handler.ServeHTTP(w, r)
+ })
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/errors.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/errors.go
new file mode 100644
index 000000000000..245c0899eedf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/errors.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httptypes
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api/v2http/httptypes")
+)
+
+type HTTPError struct {
+ Message string `json:"message"`
+ // Code is the HTTP status code
+ Code int `json:"-"`
+}
+
+func (e HTTPError) Error() string {
+ return e.Message
+}
+
+func (e HTTPError) WriteTo(w http.ResponseWriter) error {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(e.Code)
+ b, err := json.Marshal(e)
+ if err != nil {
+ plog.Panicf("marshal HTTPError should never fail (%v)", err)
+ }
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+func NewHTTPError(code int, m string) *HTTPError {
+ return &HTTPError{
+ Message: m,
+ Code: code,
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/member.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/member.go
new file mode 100644
index 000000000000..95fd443ffdc6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/httptypes/member.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package httptypes defines how etcd's HTTP API entities are serialized to and
+// deserialized from JSON.
+package httptypes
+
+import (
+ "encoding/json"
+
+ "go.etcd.io/etcd/pkg/types"
+)
+
+type Member struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ PeerURLs []string `json:"peerURLs"`
+ ClientURLs []string `json:"clientURLs"`
+}
+
+type MemberCreateRequest struct {
+ PeerURLs types.URLs
+}
+
+type MemberUpdateRequest struct {
+ MemberCreateRequest
+}
+
+func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error {
+ s := struct {
+ PeerURLs []string `json:"peerURLs"`
+ }{}
+
+ err := json.Unmarshal(data, &s)
+ if err != nil {
+ return err
+ }
+
+ urls, err := types.NewURLs(s.PeerURLs)
+ if err != nil {
+ return err
+ }
+
+ m.PeerURLs = urls
+ return nil
+}
+
+type MemberCollection []Member
+
+func (c *MemberCollection) MarshalJSON() ([]byte, error) {
+ d := struct {
+ Members []Member `json:"members"`
+ }{
+ Members: []Member(*c),
+ }
+
+ return json.Marshal(d)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2http/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/metrics.go
new file mode 100644
index 000000000000..14f7da0fea70
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2http/metrics.go
@@ -0,0 +1,99 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2http
+
+import (
+ "strconv"
+ "time"
+
+ "net/http"
+
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+ "go.etcd.io/etcd/etcdserver/api/v2http/httptypes"
+ "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ incomingEvents = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "http",
+ Name: "received_total",
+ Help: "Counter of requests received into the system (successfully parsed and authd).",
+ }, []string{"method"})
+
+ failedEvents = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "http",
+ Name: "failed_total",
+ Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).",
+ }, []string{"method", "code"})
+
+ successfulEventsHandlingSec = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "http",
+ Name: "successful_duration_seconds",
+ Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).",
+
+ // lowest bucket start of upper bound 0.0005 sec (0.5 ms) with factor 2
+ // highest bucket start of 0.0005 sec * 2^12 == 2.048 sec
+ Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
+ }, []string{"method"})
+)
+
+func init() {
+ prometheus.MustRegister(incomingEvents)
+ prometheus.MustRegister(failedEvents)
+ prometheus.MustRegister(successfulEventsHandlingSec)
+}
+
+func reportRequestReceived(request etcdserverpb.Request) {
+ incomingEvents.WithLabelValues(methodFromRequest(request)).Inc()
+}
+
+func reportRequestCompleted(request etcdserverpb.Request, startTime time.Time) {
+ method := methodFromRequest(request)
+ successfulEventsHandlingSec.WithLabelValues(method).Observe(time.Since(startTime).Seconds())
+}
+
+func reportRequestFailed(request etcdserverpb.Request, err error) {
+ method := methodFromRequest(request)
+ failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc()
+}
+
+func methodFromRequest(request etcdserverpb.Request) string {
+ if request.Method == "GET" && request.Quorum {
+ return "QGET"
+ }
+ return request.Method
+}
+
+func codeFromError(err error) int {
+ if err == nil {
+ return http.StatusInternalServerError
+ }
+ switch e := err.(type) {
+ case *v2error.Error:
+ return e.StatusCode()
+ case *httptypes.HTTPError:
+ return e.Code
+ default:
+ return http.StatusInternalServerError
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/leader.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/leader.go
new file mode 100644
index 000000000000..ca47f0f37a08
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/leader.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2stats
+
+import (
+ "encoding/json"
+ "math"
+ "sync"
+ "time"
+)
+
+// LeaderStats is used by the leader in an etcd cluster, and encapsulates
+// statistics about communication with its followers
+type LeaderStats struct {
+ leaderStats
+ sync.Mutex
+}
+
+type leaderStats struct {
+ // Leader is the ID of the leader in the etcd cluster.
+ // TODO(jonboulle): clarify that these are IDs, not names
+ Leader string `json:"leader"`
+ Followers map[string]*FollowerStats `json:"followers"`
+}
+
+// NewLeaderStats generates a new LeaderStats with the given id as leader
+func NewLeaderStats(id string) *LeaderStats {
+ return &LeaderStats{
+ leaderStats: leaderStats{
+ Leader: id,
+ Followers: make(map[string]*FollowerStats),
+ },
+ }
+}
+
+func (ls *LeaderStats) JSON() []byte {
+ ls.Lock()
+ stats := ls.leaderStats
+ ls.Unlock()
+ b, err := json.Marshal(stats)
+ // TODO(jonboulle): appropriate error handling?
+ if err != nil {
+ plog.Errorf("error marshalling leader stats (%v)", err)
+ }
+ return b
+}
+
+func (ls *LeaderStats) Follower(name string) *FollowerStats {
+ ls.Lock()
+ defer ls.Unlock()
+ fs, ok := ls.Followers[name]
+ if !ok {
+ fs = &FollowerStats{}
+ fs.Latency.Minimum = 1 << 63
+ ls.Followers[name] = fs
+ }
+ return fs
+}
+
+// FollowerStats encapsulates various statistics about a follower in an etcd cluster
+type FollowerStats struct {
+ Latency LatencyStats `json:"latency"`
+ Counts CountsStats `json:"counts"`
+
+ sync.Mutex
+}
+
+// LatencyStats encapsulates latency statistics.
+type LatencyStats struct {
+ Current float64 `json:"current"`
+ Average float64 `json:"average"`
+ averageSquare float64
+ StandardDeviation float64 `json:"standardDeviation"`
+ Minimum float64 `json:"minimum"`
+ Maximum float64 `json:"maximum"`
+}
+
+// CountsStats encapsulates raft statistics.
+type CountsStats struct {
+ Fail uint64 `json:"fail"`
+ Success uint64 `json:"success"`
+}
+
+// Succ updates the FollowerStats with a successful send
+func (fs *FollowerStats) Succ(d time.Duration) {
+ fs.Lock()
+ defer fs.Unlock()
+
+ total := float64(fs.Counts.Success) * fs.Latency.Average
+ totalSquare := float64(fs.Counts.Success) * fs.Latency.averageSquare
+
+ fs.Counts.Success++
+
+ fs.Latency.Current = float64(d) / (1000000.0)
+
+ if fs.Latency.Current > fs.Latency.Maximum {
+ fs.Latency.Maximum = fs.Latency.Current
+ }
+
+ if fs.Latency.Current < fs.Latency.Minimum {
+ fs.Latency.Minimum = fs.Latency.Current
+ }
+
+ fs.Latency.Average = (total + fs.Latency.Current) / float64(fs.Counts.Success)
+ fs.Latency.averageSquare = (totalSquare + fs.Latency.Current*fs.Latency.Current) / float64(fs.Counts.Success)
+
+ // sdv = sqrt(avg(x^2) - avg(x)^2)
+ fs.Latency.StandardDeviation = math.Sqrt(fs.Latency.averageSquare - fs.Latency.Average*fs.Latency.Average)
+}
+
+// Fail updates the FollowerStats with an unsuccessful send
+func (fs *FollowerStats) Fail() {
+ fs.Lock()
+ defer fs.Unlock()
+ fs.Counts.Fail++
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/queue.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/queue.go
new file mode 100644
index 000000000000..2c3dff3d0ffc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/queue.go
@@ -0,0 +1,110 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2stats
+
+import (
+ "sync"
+ "time"
+)
+
+const (
+ queueCapacity = 200
+)
+
+// RequestStats represent the stats for a request.
+// It encapsulates the sending time and the size of the request.
+type RequestStats struct {
+ SendingTime time.Time
+ Size int
+}
+
+type statsQueue struct {
+ items [queueCapacity]*RequestStats
+ size int
+ front int
+ back int
+ totalReqSize int
+ rwl sync.RWMutex
+}
+
+func (q *statsQueue) Len() int {
+ return q.size
+}
+
+func (q *statsQueue) ReqSize() int {
+ return q.totalReqSize
+}
+
+// FrontAndBack gets the front and back elements in the queue
+// We must grab front and back together with the protection of the lock
+func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) {
+ q.rwl.RLock()
+ defer q.rwl.RUnlock()
+ if q.size != 0 {
+ return q.items[q.front], q.items[q.back]
+ }
+ return nil, nil
+}
+
+// Insert function insert a RequestStats into the queue and update the records
+func (q *statsQueue) Insert(p *RequestStats) {
+ q.rwl.Lock()
+ defer q.rwl.Unlock()
+
+ q.back = (q.back + 1) % queueCapacity
+
+ if q.size == queueCapacity { //dequeue
+ q.totalReqSize -= q.items[q.front].Size
+ q.front = (q.back + 1) % queueCapacity
+ } else {
+ q.size++
+ }
+
+ q.items[q.back] = p
+ q.totalReqSize += q.items[q.back].Size
+
+}
+
+// Rate function returns the package rate and byte rate
+func (q *statsQueue) Rate() (float64, float64) {
+ front, back := q.frontAndBack()
+
+ if front == nil || back == nil {
+ return 0, 0
+ }
+
+ if time.Since(back.SendingTime) > time.Second {
+ q.Clear()
+ return 0, 0
+ }
+
+ sampleDuration := back.SendingTime.Sub(front.SendingTime)
+
+ pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second)
+
+ br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second)
+
+ return pr, br
+}
+
+// Clear function clear up the statsQueue
+func (q *statsQueue) Clear() {
+ q.rwl.Lock()
+ defer q.rwl.Unlock()
+ q.back = -1
+ q.front = 0
+ q.size = 0
+ q.totalReqSize = 0
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/server.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/server.go
new file mode 100644
index 000000000000..c4accc735197
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/server.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2stats
+
+import (
+ "encoding/json"
+ "log"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/raft"
+)
+
+// ServerStats encapsulates various statistics about an EtcdServer and its
+// communication with other members of the cluster
+type ServerStats struct {
+ serverStats
+ sync.Mutex
+}
+
+func NewServerStats(name, id string) *ServerStats {
+ ss := &ServerStats{
+ serverStats: serverStats{
+ Name: name,
+ ID: id,
+ },
+ }
+ now := time.Now()
+ ss.StartTime = now
+ ss.LeaderInfo.StartTime = now
+ ss.sendRateQueue = &statsQueue{back: -1}
+ ss.recvRateQueue = &statsQueue{back: -1}
+ return ss
+}
+
+type serverStats struct {
+ Name string `json:"name"`
+ // ID is the raft ID of the node.
+ // TODO(jonboulle): use ID instead of name?
+ ID string `json:"id"`
+ State raft.StateType `json:"state"`
+ StartTime time.Time `json:"startTime"`
+
+ LeaderInfo struct {
+ Name string `json:"leader"`
+ Uptime string `json:"uptime"`
+ StartTime time.Time `json:"startTime"`
+ } `json:"leaderInfo"`
+
+ RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"`
+ RecvingPkgRate float64 `json:"recvPkgRate,omitempty"`
+ RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"`
+
+ SendAppendRequestCnt uint64 `json:"sendAppendRequestCnt"`
+ SendingPkgRate float64 `json:"sendPkgRate,omitempty"`
+ SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"`
+
+ sendRateQueue *statsQueue
+ recvRateQueue *statsQueue
+}
+
+func (ss *ServerStats) JSON() []byte {
+ ss.Lock()
+ stats := ss.serverStats
+ stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
+ stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
+ stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
+ ss.Unlock()
+ b, err := json.Marshal(stats)
+ // TODO(jonboulle): appropriate error handling?
+ if err != nil {
+ log.Printf("stats: error marshalling server stats: %v", err)
+ }
+ return b
+}
+
+// RecvAppendReq updates the ServerStats in response to an AppendRequest
+// from the given leader being received
+func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {
+ ss.Lock()
+ defer ss.Unlock()
+
+ now := time.Now()
+
+ ss.State = raft.StateFollower
+ if leader != ss.LeaderInfo.Name {
+ ss.LeaderInfo.Name = leader
+ ss.LeaderInfo.StartTime = now
+ }
+
+ ss.recvRateQueue.Insert(
+ &RequestStats{
+ SendingTime: now,
+ Size: reqSize,
+ },
+ )
+ ss.RecvAppendRequestCnt++
+}
+
+// SendAppendReq updates the ServerStats in response to an AppendRequest
+// being sent by this server
+func (ss *ServerStats) SendAppendReq(reqSize int) {
+ ss.Lock()
+ defer ss.Unlock()
+
+ ss.becomeLeader()
+
+ ss.sendRateQueue.Insert(
+ &RequestStats{
+ SendingTime: time.Now(),
+ Size: reqSize,
+ },
+ )
+
+ ss.SendAppendRequestCnt++
+}
+
+func (ss *ServerStats) BecomeLeader() {
+ ss.Lock()
+ defer ss.Unlock()
+ ss.becomeLeader()
+}
+
+func (ss *ServerStats) becomeLeader() {
+ if ss.State != raft.StateLeader {
+ ss.State = raft.StateLeader
+ ss.LeaderInfo.Name = ss.ID
+ ss.LeaderInfo.StartTime = time.Now()
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/stats.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/stats.go
new file mode 100644
index 000000000000..c50a20076bde
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2stats/stats.go
@@ -0,0 +1,30 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2stats defines a standard interface for etcd cluster statistics.
+package v2stats
+
+import "github.com/coreos/pkg/capnslog"
+
+var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/stats")
+
+type Stats interface {
+ // SelfStats returns the struct representing statistics of this server
+ SelfStats() []byte
+ // LeaderStats returns the statistics of all followers in the cluster
+ // if this server is leader. Otherwise, nil is returned.
+ LeaderStats() []byte
+ // StoreStats returns statistics of the store backing this EtcdServer
+ StoreStats() []byte
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/doc.go
new file mode 100644
index 000000000000..1933e4cd5ace
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2store defines etcd's in-memory key/value store in v2 API.
+// To be deprecated in favor of v3 storage.
+package v2store
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event.go
new file mode 100644
index 000000000000..33e901744d58
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+const (
+ Get = "get"
+ Create = "create"
+ Set = "set"
+ Update = "update"
+ Delete = "delete"
+ CompareAndSwap = "compareAndSwap"
+ CompareAndDelete = "compareAndDelete"
+ Expire = "expire"
+)
+
+type Event struct {
+ Action string `json:"action"`
+ Node *NodeExtern `json:"node,omitempty"`
+ PrevNode *NodeExtern `json:"prevNode,omitempty"`
+ EtcdIndex uint64 `json:"-"`
+ Refresh bool `json:"refresh,omitempty"`
+}
+
+func newEvent(action string, key string, modifiedIndex, createdIndex uint64) *Event {
+ n := &NodeExtern{
+ Key: key,
+ ModifiedIndex: modifiedIndex,
+ CreatedIndex: createdIndex,
+ }
+
+ return &Event{
+ Action: action,
+ Node: n,
+ }
+}
+
+func (e *Event) IsCreated() bool {
+ if e.Action == Create {
+ return true
+ }
+ return e.Action == Set && e.PrevNode == nil
+}
+
+func (e *Event) Index() uint64 {
+ return e.Node.ModifiedIndex
+}
+
+func (e *Event) Clone() *Event {
+ return &Event{
+ Action: e.Action,
+ EtcdIndex: e.EtcdIndex,
+ Node: e.Node.Clone(),
+ PrevNode: e.PrevNode.Clone(),
+ }
+}
+
+func (e *Event) SetRefresh() {
+ e.Refresh = true
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_history.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_history.go
new file mode 100644
index 000000000000..e4a969f37285
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_history.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "fmt"
+ "path"
+ "strings"
+ "sync"
+
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+)
+
+type EventHistory struct {
+ Queue eventQueue
+ StartIndex uint64
+ LastIndex uint64
+ rwl sync.RWMutex
+}
+
+func newEventHistory(capacity int) *EventHistory {
+ return &EventHistory{
+ Queue: eventQueue{
+ Capacity: capacity,
+ Events: make([]*Event, capacity),
+ },
+ }
+}
+
+// addEvent function adds event into the eventHistory
+func (eh *EventHistory) addEvent(e *Event) *Event {
+ eh.rwl.Lock()
+ defer eh.rwl.Unlock()
+
+ eh.Queue.insert(e)
+
+ eh.LastIndex = e.Index()
+
+ eh.StartIndex = eh.Queue.Events[eh.Queue.Front].Index()
+
+ return e
+}
+
+// scan enumerates events from the index history and stops at the first point
+// where the key matches.
+func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, *v2error.Error) {
+ eh.rwl.RLock()
+ defer eh.rwl.RUnlock()
+
+ // index should be after the event history's StartIndex
+ if index < eh.StartIndex {
+ return nil,
+ v2error.NewError(v2error.EcodeEventIndexCleared,
+ fmt.Sprintf("the requested history has been cleared [%v/%v]",
+ eh.StartIndex, index), 0)
+ }
+
+ // the index should come before the size of the queue minus the duplicate count
+ if index > eh.LastIndex { // future index
+ return nil, nil
+ }
+
+ offset := index - eh.StartIndex
+ i := (eh.Queue.Front + int(offset)) % eh.Queue.Capacity
+
+ for {
+ e := eh.Queue.Events[i]
+
+ if !e.Refresh {
+ ok := e.Node.Key == key
+
+ if recursive {
+ // add tailing slash
+ nkey := path.Clean(key)
+ if nkey[len(nkey)-1] != '/' {
+ nkey = nkey + "/"
+ }
+
+ ok = ok || strings.HasPrefix(e.Node.Key, nkey)
+ }
+
+ if (e.Action == Delete || e.Action == Expire) && e.PrevNode != nil && e.PrevNode.Dir {
+ ok = ok || strings.HasPrefix(key, e.PrevNode.Key)
+ }
+
+ if ok {
+ return e, nil
+ }
+ }
+
+ i = (i + 1) % eh.Queue.Capacity
+
+ if i == eh.Queue.Back {
+ return nil, nil
+ }
+ }
+}
+
+// clone will be protected by a stop-world lock
+// do not need to obtain internal lock
+func (eh *EventHistory) clone() *EventHistory {
+ clonedQueue := eventQueue{
+ Capacity: eh.Queue.Capacity,
+ Events: make([]*Event, eh.Queue.Capacity),
+ Size: eh.Queue.Size,
+ Front: eh.Queue.Front,
+ Back: eh.Queue.Back,
+ }
+
+ copy(clonedQueue.Events, eh.Queue.Events)
+ return &EventHistory{
+ StartIndex: eh.StartIndex,
+ Queue: clonedQueue,
+ LastIndex: eh.LastIndex,
+ }
+
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_queue.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_queue.go
new file mode 100644
index 000000000000..7ea03de8c9a4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/event_queue.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+type eventQueue struct {
+ Events []*Event
+ Size int
+ Front int
+ Back int
+ Capacity int
+}
+
+func (eq *eventQueue) insert(e *Event) {
+ eq.Events[eq.Back] = e
+ eq.Back = (eq.Back + 1) % eq.Capacity
+
+ if eq.Size == eq.Capacity { //dequeue
+ eq.Front = (eq.Front + 1) % eq.Capacity
+ } else {
+ eq.Size++
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/metrics.go
new file mode 100644
index 000000000000..5adea1efdd13
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/metrics.go
@@ -0,0 +1,130 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// Set of raw Prometheus metrics.
+// Labels
+// * action = declared in event.go
+// * outcome = Outcome
+// Do not increment directly, use Report* methods.
+var (
+ readCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "reads_total",
+ Help: "Total number of reads action by (get/getRecursive), local to this member.",
+ }, []string{"action"})
+
+ writeCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "writes_total",
+ Help: "Total number of writes (e.g. set/compareAndDelete) seen by this member.",
+ }, []string{"action"})
+
+ readFailedCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "reads_failed_total",
+ Help: "Failed read actions by (get/getRecursive), local to this member.",
+ }, []string{"action"})
+
+ writeFailedCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "writes_failed_total",
+ Help: "Failed write actions (e.g. set/compareAndDelete), seen by this member.",
+ }, []string{"action"})
+
+ expireCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "expires_total",
+ Help: "Total number of expired keys.",
+ })
+
+ watchRequests = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "watch_requests_total",
+ Help: "Total number of incoming watch requests (new or reestablished).",
+ })
+
+ watcherCount = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "watchers",
+ Help: "Count of currently active watchers.",
+ })
+)
+
+const (
+ GetRecursive = "getRecursive"
+)
+
+func init() {
+ if prometheus.Register(readCounter) != nil {
+ // Tests will try to double register since the tests use both
+ // store and store_test packages; ignore second attempts.
+ return
+ }
+ prometheus.MustRegister(writeCounter)
+ prometheus.MustRegister(expireCounter)
+ prometheus.MustRegister(watchRequests)
+ prometheus.MustRegister(watcherCount)
+}
+
+func reportReadSuccess(readAction string) {
+ readCounter.WithLabelValues(readAction).Inc()
+}
+
+func reportReadFailure(readAction string) {
+ readCounter.WithLabelValues(readAction).Inc()
+ readFailedCounter.WithLabelValues(readAction).Inc()
+}
+
+func reportWriteSuccess(writeAction string) {
+ writeCounter.WithLabelValues(writeAction).Inc()
+}
+
+func reportWriteFailure(writeAction string) {
+ writeCounter.WithLabelValues(writeAction).Inc()
+ writeFailedCounter.WithLabelValues(writeAction).Inc()
+}
+
+func reportExpiredKey() {
+ expireCounter.Inc()
+}
+
+func reportWatchRequest() {
+ watchRequests.Inc()
+}
+
+func reportWatcherAdded() {
+ watcherCount.Inc()
+}
+
+func reportWatcherRemoved() {
+ watcherCount.Dec()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node.go
new file mode 100644
index 000000000000..38a6984fb6a3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node.go
@@ -0,0 +1,396 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "path"
+ "sort"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+
+ "github.com/jonboulle/clockwork"
+)
+
+// explanations of Compare function result
+const (
+ CompareMatch = iota
+ CompareIndexNotMatch
+ CompareValueNotMatch
+ CompareNotMatch
+)
+
+var Permanent time.Time
+
+// node is the basic element in the store system.
+// A key-value pair will have a string value
+// A directory will have a children map
+type node struct {
+ Path string
+
+ CreatedIndex uint64
+ ModifiedIndex uint64
+
+ Parent *node `json:"-"` // should not encode this field! avoid circular dependency.
+
+ ExpireTime time.Time
+ Value string // for key-value pair
+ Children map[string]*node // for directory
+
+ // A reference to the store this node is attached to.
+ store *store
+}
+
+// newKV creates a Key-Value pair
+func newKV(store *store, nodePath string, value string, createdIndex uint64, parent *node, expireTime time.Time) *node {
+ return &node{
+ Path: nodePath,
+ CreatedIndex: createdIndex,
+ ModifiedIndex: createdIndex,
+ Parent: parent,
+ store: store,
+ ExpireTime: expireTime,
+ Value: value,
+ }
+}
+
+// newDir creates a directory
+func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node {
+ return &node{
+ Path: nodePath,
+ CreatedIndex: createdIndex,
+ ModifiedIndex: createdIndex,
+ Parent: parent,
+ ExpireTime: expireTime,
+ Children: make(map[string]*node),
+ store: store,
+ }
+}
+
+// IsHidden function checks if the node is a hidden node. A hidden node
+// will begin with '_'
+// A hidden node will not be shown via get command under a directory
+// For example if we have /foo/_hidden and /foo/notHidden, get "/foo"
+// will only return /foo/notHidden
+func (n *node) IsHidden() bool {
+ _, name := path.Split(n.Path)
+
+ return name[0] == '_'
+}
+
+// IsPermanent function checks if the node is a permanent one.
+func (n *node) IsPermanent() bool {
+ // we use a uninitialized time.Time to indicate the node is a
+ // permanent one.
+ // the uninitialized time.Time should equal zero.
+ return n.ExpireTime.IsZero()
+}
+
+// IsDir function checks whether the node is a directory.
+// If the node is a directory, the function will return true.
+// Otherwise the function will return false.
+func (n *node) IsDir() bool {
+ return n.Children != nil
+}
+
+// Read function gets the value of the node.
+// If the receiver node is not a key-value pair, a "Not A File" error will be returned.
+func (n *node) Read() (string, *v2error.Error) {
+ if n.IsDir() {
+ return "", v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex)
+ }
+
+ return n.Value, nil
+}
+
+// Write function set the value of the node to the given value.
+// If the receiver node is a directory, a "Not A File" error will be returned.
+func (n *node) Write(value string, index uint64) *v2error.Error {
+ if n.IsDir() {
+ return v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex)
+ }
+
+ n.Value = value
+ n.ModifiedIndex = index
+
+ return nil
+}
+
+func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) {
+ if !n.IsPermanent() {
+ /* compute ttl as:
+ ceiling( (expireTime - timeNow) / nanosecondsPerSecond )
+ which ranges from 1..n
+ rather than as:
+ ( (expireTime - timeNow) / nanosecondsPerSecond ) + 1
+ which ranges 1..n+1
+ */
+ ttlN := n.ExpireTime.Sub(clock.Now())
+ ttl := ttlN / time.Second
+ if (ttlN % time.Second) > 0 {
+ ttl++
+ }
+ t := n.ExpireTime.UTC()
+ return &t, int64(ttl)
+ }
+ return nil, 0
+}
+
+// List function return a slice of nodes under the receiver node.
+// If the receiver node is not a directory, a "Not A Directory" error will be returned.
+func (n *node) List() ([]*node, *v2error.Error) {
+ if !n.IsDir() {
+ return nil, v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex)
+ }
+
+ nodes := make([]*node, len(n.Children))
+
+ i := 0
+ for _, node := range n.Children {
+ nodes[i] = node
+ i++
+ }
+
+ return nodes, nil
+}
+
+// GetChild function returns the child node under the directory node.
+// On success, it returns the file node
+func (n *node) GetChild(name string) (*node, *v2error.Error) {
+ if !n.IsDir() {
+ return nil, v2error.NewError(v2error.EcodeNotDir, n.Path, n.store.CurrentIndex)
+ }
+
+ child, ok := n.Children[name]
+
+ if ok {
+ return child, nil
+ }
+
+ return nil, nil
+}
+
+// Add function adds a node to the receiver node.
+// If the receiver is not a directory, a "Not A Directory" error will be returned.
+// If there is an existing node with the same name under the directory, a "Already Exist"
+// error will be returned
+func (n *node) Add(child *node) *v2error.Error {
+ if !n.IsDir() {
+ return v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex)
+ }
+
+ _, name := path.Split(child.Path)
+
+ if _, ok := n.Children[name]; ok {
+ return v2error.NewError(v2error.EcodeNodeExist, "", n.store.CurrentIndex)
+ }
+
+ n.Children[name] = child
+
+ return nil
+}
+
+// Remove function remove the node.
+func (n *node) Remove(dir, recursive bool, callback func(path string)) *v2error.Error {
+ if !n.IsDir() { // key-value pair
+ _, name := path.Split(n.Path)
+
+ // find its parent and remove the node from the map
+ if n.Parent != nil && n.Parent.Children[name] == n {
+ delete(n.Parent.Children, name)
+ }
+
+ if callback != nil {
+ callback(n.Path)
+ }
+
+ if !n.IsPermanent() {
+ n.store.ttlKeyHeap.remove(n)
+ }
+
+ return nil
+ }
+
+ if !dir {
+ // cannot delete a directory without dir set to true
+ return v2error.NewError(v2error.EcodeNotFile, n.Path, n.store.CurrentIndex)
+ }
+
+ if len(n.Children) != 0 && !recursive {
+ // cannot delete a directory if it is not empty and the operation
+ // is not recursive
+ return v2error.NewError(v2error.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex)
+ }
+
+ for _, child := range n.Children { // delete all children
+ child.Remove(true, true, callback)
+ }
+
+ // delete self
+ _, name := path.Split(n.Path)
+ if n.Parent != nil && n.Parent.Children[name] == n {
+ delete(n.Parent.Children, name)
+
+ if callback != nil {
+ callback(n.Path)
+ }
+
+ if !n.IsPermanent() {
+ n.store.ttlKeyHeap.remove(n)
+ }
+ }
+
+ return nil
+}
+
+func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern {
+ if n.IsDir() {
+ node := &NodeExtern{
+ Key: n.Path,
+ Dir: true,
+ ModifiedIndex: n.ModifiedIndex,
+ CreatedIndex: n.CreatedIndex,
+ }
+ node.Expiration, node.TTL = n.expirationAndTTL(clock)
+
+ if !recursive {
+ return node
+ }
+
+ children, _ := n.List()
+ node.Nodes = make(NodeExterns, len(children))
+
+ // we do not use the index in the children slice directly
+ // we need to skip the hidden one
+ i := 0
+
+ for _, child := range children {
+
+ if child.IsHidden() { // get will not list hidden node
+ continue
+ }
+
+ node.Nodes[i] = child.Repr(recursive, sorted, clock)
+
+ i++
+ }
+
+ // eliminate hidden nodes
+ node.Nodes = node.Nodes[:i]
+ if sorted {
+ sort.Sort(node.Nodes)
+ }
+
+ return node
+ }
+
+ // since n.Value could be changed later, so we need to copy the value out
+ value := n.Value
+ node := &NodeExtern{
+ Key: n.Path,
+ Value: &value,
+ ModifiedIndex: n.ModifiedIndex,
+ CreatedIndex: n.CreatedIndex,
+ }
+ node.Expiration, node.TTL = n.expirationAndTTL(clock)
+ return node
+}
+
+func (n *node) UpdateTTL(expireTime time.Time) {
+ if !n.IsPermanent() {
+ if expireTime.IsZero() {
+ // from ttl to permanent
+ n.ExpireTime = expireTime
+ // remove from ttl heap
+ n.store.ttlKeyHeap.remove(n)
+ return
+ }
+
+ // update ttl
+ n.ExpireTime = expireTime
+ // update ttl heap
+ n.store.ttlKeyHeap.update(n)
+ return
+ }
+
+ if expireTime.IsZero() {
+ return
+ }
+
+ // from permanent to ttl
+ n.ExpireTime = expireTime
+ // push into ttl heap
+ n.store.ttlKeyHeap.push(n)
+}
+
+// Compare function compares node index and value with provided ones.
+// second result value explains result and equals to one of Compare.. constants
+func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) {
+ indexMatch := prevIndex == 0 || n.ModifiedIndex == prevIndex
+ valueMatch := prevValue == "" || n.Value == prevValue
+ ok = valueMatch && indexMatch
+ switch {
+ case valueMatch && indexMatch:
+ which = CompareMatch
+ case indexMatch && !valueMatch:
+ which = CompareValueNotMatch
+ case valueMatch && !indexMatch:
+ which = CompareIndexNotMatch
+ default:
+ which = CompareNotMatch
+ }
+ return ok, which
+}
+
+// Clone function clone the node recursively and return the new node.
+// If the node is a directory, it will clone all the content under this directory.
+// If the node is a key-value pair, it will clone the pair.
+func (n *node) Clone() *node {
+ if !n.IsDir() {
+ newkv := newKV(n.store, n.Path, n.Value, n.CreatedIndex, n.Parent, n.ExpireTime)
+ newkv.ModifiedIndex = n.ModifiedIndex
+ return newkv
+ }
+
+ clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime)
+ clone.ModifiedIndex = n.ModifiedIndex
+
+ for key, child := range n.Children {
+ clone.Children[key] = child.Clone()
+ }
+
+ return clone
+}
+
+// recoverAndclean function help to do recovery.
+// Two things need to be done: 1. recovery structure; 2. delete expired nodes
+//
+// If the node is a directory, it will help recover children's parent pointer and recursively
+// call this function on its children.
+// We check the expire last since we need to recover the whole structure first and add all the
+// notifications into the event history.
+func (n *node) recoverAndclean() {
+ if n.IsDir() {
+ for _, child := range n.Children {
+ child.Parent = n
+ child.store = n.store
+ child.recoverAndclean()
+ }
+ }
+
+ if !n.ExpireTime.IsZero() {
+ n.store.ttlKeyHeap.push(n)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node_extern.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node_extern.go
new file mode 100644
index 000000000000..b3bf5f3c9768
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/node_extern.go
@@ -0,0 +1,116 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "sort"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+)
+
+// NodeExtern is the external representation of the
+// internal node with additional fields
+// PrevValue is the previous value of the node
+// TTL is time to live in second
+type NodeExtern struct {
+ Key string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+ Dir bool `json:"dir,omitempty"`
+ Expiration *time.Time `json:"expiration,omitempty"`
+ TTL int64 `json:"ttl,omitempty"`
+ Nodes NodeExterns `json:"nodes,omitempty"`
+ ModifiedIndex uint64 `json:"modifiedIndex,omitempty"`
+ CreatedIndex uint64 `json:"createdIndex,omitempty"`
+}
+
+func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock clockwork.Clock) {
+ if n.IsDir() { // node is a directory
+ eNode.Dir = true
+
+ children, _ := n.List()
+ eNode.Nodes = make(NodeExterns, len(children))
+
+ // we do not use the index in the children slice directly
+ // we need to skip the hidden one
+ i := 0
+
+ for _, child := range children {
+ if child.IsHidden() { // get will not return hidden nodes
+ continue
+ }
+
+ eNode.Nodes[i] = child.Repr(recursive, sorted, clock)
+ i++
+ }
+
+ // eliminate hidden nodes
+ eNode.Nodes = eNode.Nodes[:i]
+
+ if sorted {
+ sort.Sort(eNode.Nodes)
+ }
+
+ } else { // node is a file
+ value, _ := n.Read()
+ eNode.Value = &value
+ }
+
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(clock)
+}
+
+func (eNode *NodeExtern) Clone() *NodeExtern {
+ if eNode == nil {
+ return nil
+ }
+ nn := &NodeExtern{
+ Key: eNode.Key,
+ Dir: eNode.Dir,
+ TTL: eNode.TTL,
+ ModifiedIndex: eNode.ModifiedIndex,
+ CreatedIndex: eNode.CreatedIndex,
+ }
+ if eNode.Value != nil {
+ s := *eNode.Value
+ nn.Value = &s
+ }
+ if eNode.Expiration != nil {
+ t := *eNode.Expiration
+ nn.Expiration = &t
+ }
+ if eNode.Nodes != nil {
+ nn.Nodes = make(NodeExterns, len(eNode.Nodes))
+ for i, n := range eNode.Nodes {
+ nn.Nodes[i] = n.Clone()
+ }
+ }
+ return nn
+}
+
+type NodeExterns []*NodeExtern
+
+// interfaces for sorting
+
+func (ns NodeExterns) Len() int {
+ return len(ns)
+}
+
+func (ns NodeExterns) Less(i, j int) bool {
+ return ns[i].Key < ns[j].Key
+}
+
+func (ns NodeExterns) Swap(i, j int) {
+ ns[i], ns[j] = ns[j], ns[i]
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/stats.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/stats.go
new file mode 100644
index 000000000000..45bc97f01ba4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/stats.go
@@ -0,0 +1,145 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "encoding/json"
+ "sync/atomic"
+)
+
+const (
+ SetSuccess = iota
+ SetFail
+ DeleteSuccess
+ DeleteFail
+ CreateSuccess
+ CreateFail
+ UpdateSuccess
+ UpdateFail
+ CompareAndSwapSuccess
+ CompareAndSwapFail
+ GetSuccess
+ GetFail
+ ExpireCount
+ CompareAndDeleteSuccess
+ CompareAndDeleteFail
+)
+
+type Stats struct {
+ // Number of get requests
+
+ GetSuccess uint64 `json:"getsSuccess"`
+ GetFail uint64 `json:"getsFail"`
+
+ // Number of sets requests
+
+ SetSuccess uint64 `json:"setsSuccess"`
+ SetFail uint64 `json:"setsFail"`
+
+ // Number of delete requests
+
+ DeleteSuccess uint64 `json:"deleteSuccess"`
+ DeleteFail uint64 `json:"deleteFail"`
+
+ // Number of update requests
+
+ UpdateSuccess uint64 `json:"updateSuccess"`
+ UpdateFail uint64 `json:"updateFail"`
+
+ // Number of create requests
+
+ CreateSuccess uint64 `json:"createSuccess"`
+ CreateFail uint64 `json:"createFail"`
+
+ // Number of testAndSet requests
+
+ CompareAndSwapSuccess uint64 `json:"compareAndSwapSuccess"`
+ CompareAndSwapFail uint64 `json:"compareAndSwapFail"`
+
+ // Number of compareAndDelete requests
+
+ CompareAndDeleteSuccess uint64 `json:"compareAndDeleteSuccess"`
+ CompareAndDeleteFail uint64 `json:"compareAndDeleteFail"`
+
+ ExpireCount uint64 `json:"expireCount"`
+
+ Watchers uint64 `json:"watchers"`
+}
+
+func newStats() *Stats {
+ s := new(Stats)
+ return s
+}
+
+func (s *Stats) clone() *Stats {
+ return &Stats{
+ GetSuccess: s.GetSuccess,
+ GetFail: s.GetFail,
+ SetSuccess: s.SetSuccess,
+ SetFail: s.SetFail,
+ DeleteSuccess: s.DeleteSuccess,
+ DeleteFail: s.DeleteFail,
+ UpdateSuccess: s.UpdateSuccess,
+ UpdateFail: s.UpdateFail,
+ CreateSuccess: s.CreateSuccess,
+ CreateFail: s.CreateFail,
+ CompareAndSwapSuccess: s.CompareAndSwapSuccess,
+ CompareAndSwapFail: s.CompareAndSwapFail,
+ CompareAndDeleteSuccess: s.CompareAndDeleteSuccess,
+ CompareAndDeleteFail: s.CompareAndDeleteFail,
+ ExpireCount: s.ExpireCount,
+ Watchers: s.Watchers,
+ }
+}
+
+func (s *Stats) toJson() []byte {
+ b, _ := json.Marshal(s)
+ return b
+}
+
+func (s *Stats) Inc(field int) {
+ switch field {
+ case SetSuccess:
+ atomic.AddUint64(&s.SetSuccess, 1)
+ case SetFail:
+ atomic.AddUint64(&s.SetFail, 1)
+ case CreateSuccess:
+ atomic.AddUint64(&s.CreateSuccess, 1)
+ case CreateFail:
+ atomic.AddUint64(&s.CreateFail, 1)
+ case DeleteSuccess:
+ atomic.AddUint64(&s.DeleteSuccess, 1)
+ case DeleteFail:
+ atomic.AddUint64(&s.DeleteFail, 1)
+ case GetSuccess:
+ atomic.AddUint64(&s.GetSuccess, 1)
+ case GetFail:
+ atomic.AddUint64(&s.GetFail, 1)
+ case UpdateSuccess:
+ atomic.AddUint64(&s.UpdateSuccess, 1)
+ case UpdateFail:
+ atomic.AddUint64(&s.UpdateFail, 1)
+ case CompareAndSwapSuccess:
+ atomic.AddUint64(&s.CompareAndSwapSuccess, 1)
+ case CompareAndSwapFail:
+ atomic.AddUint64(&s.CompareAndSwapFail, 1)
+ case CompareAndDeleteSuccess:
+ atomic.AddUint64(&s.CompareAndDeleteSuccess, 1)
+ case CompareAndDeleteFail:
+ atomic.AddUint64(&s.CompareAndDeleteFail, 1)
+ case ExpireCount:
+ atomic.AddUint64(&s.ExpireCount, 1)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/store.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/store.go
new file mode 100644
index 000000000000..ce940436eb70
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/store.go
@@ -0,0 +1,791 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/jonboulle/clockwork"
+)
+
+// The default version to set when the store is first initialized.
+const defaultVersion = 2
+
+var minExpireTime time.Time
+
+func init() {
+ minExpireTime, _ = time.Parse(time.RFC3339, "2000-01-01T00:00:00Z")
+}
+
+type Store interface {
+ Version() int
+ Index() uint64
+
+ Get(nodePath string, recursive, sorted bool) (*Event, error)
+ Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error)
+ Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error)
+ Create(nodePath string, dir bool, value string, unique bool,
+ expireOpts TTLOptionSet) (*Event, error)
+ CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
+ value string, expireOpts TTLOptionSet) (*Event, error)
+ Delete(nodePath string, dir, recursive bool) (*Event, error)
+ CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error)
+
+ Watch(prefix string, recursive, stream bool, sinceIndex uint64) (Watcher, error)
+
+ Save() ([]byte, error)
+ Recovery(state []byte) error
+
+ Clone() Store
+ SaveNoCopy() ([]byte, error)
+
+ JsonStats() []byte
+ DeleteExpiredKeys(cutoff time.Time)
+
+ HasTTLKeys() bool
+}
+
+type TTLOptionSet struct {
+ ExpireTime time.Time
+ Refresh bool
+}
+
+type store struct {
+ Root *node
+ WatcherHub *watcherHub
+ CurrentIndex uint64
+ Stats *Stats
+ CurrentVersion int
+ ttlKeyHeap *ttlKeyHeap // need to recovery manually
+ worldLock sync.RWMutex // stop the world lock
+ clock clockwork.Clock
+ readonlySet types.Set
+}
+
+// New creates a store where the given namespaces will be created as initial directories.
+func New(namespaces ...string) Store {
+ s := newStore(namespaces...)
+ s.clock = clockwork.NewRealClock()
+ return s
+}
+
+func newStore(namespaces ...string) *store {
+ s := new(store)
+ s.CurrentVersion = defaultVersion
+ s.Root = newDir(s, "/", s.CurrentIndex, nil, Permanent)
+ for _, namespace := range namespaces {
+ s.Root.Add(newDir(s, namespace, s.CurrentIndex, s.Root, Permanent))
+ }
+ s.Stats = newStats()
+ s.WatcherHub = newWatchHub(1000)
+ s.ttlKeyHeap = newTtlKeyHeap()
+ s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...)
+ return s
+}
+
+// Version retrieves current version of the store.
+func (s *store) Version() int {
+ return s.CurrentVersion
+}
+
+// Index retrieves the current index of the store.
+func (s *store) Index() uint64 {
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+ return s.CurrentIndex
+}
+
+// Get returns a get event.
+// If recursive is true, it will return all the content under the node path.
+// If sorted is true, it will sort the content by keys.
+func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(GetSuccess)
+ if recursive {
+ reportReadSuccess(GetRecursive)
+ } else {
+ reportReadSuccess(Get)
+ }
+ return
+ }
+
+ s.Stats.Inc(GetFail)
+ if recursive {
+ reportReadFailure(GetRecursive)
+ } else {
+ reportReadFailure(Get)
+ }
+ }()
+
+ n, err := s.internalGet(nodePath)
+ if err != nil {
+ return nil, err
+ }
+
+ e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.Node.loadInternalNode(n, recursive, sorted, s.clock)
+
+ return e, nil
+}
+
+// Create creates the node at nodePath. Create will help to create intermediate directories with no ttl.
+// If the node has already existed, create will fail.
+// If any node on the path is a file, create will fail.
+func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(CreateSuccess)
+ reportWriteSuccess(Create)
+ return
+ }
+
+ s.Stats.Inc(CreateFail)
+ reportWriteFailure(Create)
+ }()
+
+ e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create)
+ if err != nil {
+ return nil, err
+ }
+
+ e.EtcdIndex = s.CurrentIndex
+ s.WatcherHub.notify(e)
+
+ return e, nil
+}
+
+// Set creates or replace the node at nodePath.
+func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(SetSuccess)
+ reportWriteSuccess(Set)
+ return
+ }
+
+ s.Stats.Inc(SetFail)
+ reportWriteFailure(Set)
+ }()
+
+ // Get prevNode value
+ n, getErr := s.internalGet(nodePath)
+ if getErr != nil && getErr.ErrorCode != v2error.EcodeKeyNotFound {
+ err = getErr
+ return nil, err
+ }
+
+ if expireOpts.Refresh {
+ if getErr != nil {
+ err = getErr
+ return nil, err
+ }
+ value = n.Value
+ }
+
+ // Set new value
+ e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set)
+ if err != nil {
+ return nil, err
+ }
+ e.EtcdIndex = s.CurrentIndex
+
+ // Put prevNode into event
+ if getErr == nil {
+ prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
+ prev.Node.loadInternalNode(n, false, false, s.clock)
+ e.PrevNode = prev.Node
+ }
+
+ if !expireOpts.Refresh {
+ s.WatcherHub.notify(e)
+ } else {
+ e.SetRefresh()
+ s.WatcherHub.add(e)
+ }
+
+ return e, nil
+}
+
+// returns user-readable cause of failed comparison
+func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string {
+ switch which {
+ case CompareIndexNotMatch:
+ return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex)
+ case CompareValueNotMatch:
+ return fmt.Sprintf("[%v != %v]", prevValue, n.Value)
+ default:
+ return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex)
+ }
+}
+
+func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
+ value string, expireOpts TTLOptionSet) (*Event, error) {
+
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(CompareAndSwapSuccess)
+ reportWriteSuccess(CompareAndSwap)
+ return
+ }
+
+ s.Stats.Inc(CompareAndSwapFail)
+ reportWriteFailure(CompareAndSwap)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
+ }
+
+ n, err := s.internalGet(nodePath)
+ if err != nil {
+ return nil, err
+ }
+ if n.IsDir() { // can only compare and swap file
+ err = v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex)
+ return nil, err
+ }
+
+ // If both of the prevValue and prevIndex are given, we will test both of them.
+ // Command will be executed, only if both of the tests are successful.
+ if ok, which := n.Compare(prevValue, prevIndex); !ok {
+ cause := getCompareFailCause(n, which, prevValue, prevIndex)
+ err = v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex)
+ return nil, err
+ }
+
+ if expireOpts.Refresh {
+ value = n.Value
+ }
+
+ // update etcd index
+ s.CurrentIndex++
+
+ e := newEvent(CompareAndSwap, nodePath, s.CurrentIndex, n.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+ eNode := e.Node
+
+ // if test succeed, write the value
+ n.Write(value, s.CurrentIndex)
+ n.UpdateTTL(expireOpts.ExpireTime)
+
+ // copy the value for safety
+ valueCopy := value
+ eNode.Value = &valueCopy
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
+
+ if !expireOpts.Refresh {
+ s.WatcherHub.notify(e)
+ } else {
+ e.SetRefresh()
+ s.WatcherHub.add(e)
+ }
+
+ return e, nil
+}
+
+// Delete deletes the node at the given path.
+// If the node is a directory, recursive must be true to delete it.
+func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(DeleteSuccess)
+ reportWriteSuccess(Delete)
+ return
+ }
+
+ s.Stats.Inc(DeleteFail)
+ reportWriteFailure(Delete)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
+ }
+
+ // recursive implies dir
+ if recursive {
+ dir = true
+ }
+
+ n, err := s.internalGet(nodePath)
+ if err != nil { // if the node does not exist, return error
+ return nil, err
+ }
+
+ nextIndex := s.CurrentIndex + 1
+ e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex)
+ e.EtcdIndex = nextIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+ eNode := e.Node
+
+ if n.IsDir() {
+ eNode.Dir = true
+ }
+
+ callback := func(path string) { // notify function
+ // notify the watchers with deleted set true
+ s.WatcherHub.notifyWatchers(e, path, true)
+ }
+
+ err = n.Remove(dir, recursive, callback)
+ if err != nil {
+ return nil, err
+ }
+
+ // update etcd index
+ s.CurrentIndex++
+
+ s.WatcherHub.notify(e)
+
+ return e, nil
+}
+
+func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(CompareAndDeleteSuccess)
+ reportWriteSuccess(CompareAndDelete)
+ return
+ }
+
+ s.Stats.Inc(CompareAndDeleteFail)
+ reportWriteFailure(CompareAndDelete)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+
+ n, err := s.internalGet(nodePath)
+ if err != nil { // if the node does not exist, return error
+ return nil, err
+ }
+ if n.IsDir() { // can only compare and delete file
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex)
+ }
+
+ // If both of the prevValue and prevIndex are given, we will test both of them.
+ // Command will be executed, only if both of the tests are successful.
+ if ok, which := n.Compare(prevValue, prevIndex); !ok {
+ cause := getCompareFailCause(n, which, prevValue, prevIndex)
+ return nil, v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex)
+ }
+
+ // update etcd index
+ s.CurrentIndex++
+
+ e := newEvent(CompareAndDelete, nodePath, s.CurrentIndex, n.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+
+ callback := func(path string) { // notify function
+ // notify the watchers with deleted set true
+ s.WatcherHub.notifyWatchers(e, path, true)
+ }
+
+ err = n.Remove(false, false, callback)
+ if err != nil {
+ return nil, err
+ }
+
+ s.WatcherHub.notify(e)
+
+ return e, nil
+}
+
+func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Watcher, error) {
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+
+ key = path.Clean(path.Join("/", key))
+ if sinceIndex == 0 {
+ sinceIndex = s.CurrentIndex + 1
+ }
+ // WatcherHub does not know about the current index, so we need to pass it in
+ w, err := s.WatcherHub.watch(key, recursive, stream, sinceIndex, s.CurrentIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ return w, nil
+}
+
+// walk walks all the nodePath and apply the walkFunc on each directory
+func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *v2error.Error)) (*node, *v2error.Error) {
+ components := strings.Split(nodePath, "/")
+
+ curr := s.Root
+ var err *v2error.Error
+
+ for i := 1; i < len(components); i++ {
+ if len(components[i]) == 0 { // ignore empty string
+ return curr, nil
+ }
+
+ curr, err = walkFunc(curr, components[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return curr, nil
+}
+
+// Update updates the value/ttl of the node.
+// If the node is a file, the value and the ttl can be updated.
+// If the node is a directory, only the ttl can be updated.
+func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(UpdateSuccess)
+ reportWriteSuccess(Update)
+ return
+ }
+
+ s.Stats.Inc(UpdateFail)
+ reportWriteFailure(Update)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
+ }
+
+ currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
+
+ n, err := s.internalGet(nodePath)
+ if err != nil { // if the node does not exist, return error
+ return nil, err
+ }
+ if n.IsDir() && len(newValue) != 0 {
+ // if the node is a directory, we cannot update value to non-empty
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex)
+ }
+
+ if expireOpts.Refresh {
+ newValue = n.Value
+ }
+
+ e := newEvent(Update, nodePath, nextIndex, n.CreatedIndex)
+ e.EtcdIndex = nextIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+ eNode := e.Node
+
+ n.Write(newValue, nextIndex)
+
+ if n.IsDir() {
+ eNode.Dir = true
+ } else {
+ // copy the value for safety
+ newValueCopy := newValue
+ eNode.Value = &newValueCopy
+ }
+
+ // update ttl
+ n.UpdateTTL(expireOpts.ExpireTime)
+
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
+
+ if !expireOpts.Refresh {
+ s.WatcherHub.notify(e)
+ } else {
+ e.SetRefresh()
+ s.WatcherHub.add(e)
+ }
+
+ s.CurrentIndex = nextIndex
+
+ return e, nil
+}
+
+func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool,
+ expireTime time.Time, action string) (*Event, *v2error.Error) {
+
+ currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
+
+ if unique { // append unique item under the node path
+ nodePath += "/" + fmt.Sprintf("%020s", strconv.FormatUint(nextIndex, 10))
+ }
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", currIndex)
+ }
+
+ // Assume expire times that are way in the past are
+ // This can occur when the time is serialized to JS
+ if expireTime.Before(minExpireTime) {
+ expireTime = Permanent
+ }
+
+ dirName, nodeName := path.Split(nodePath)
+
+ // walk through the nodePath, create dirs and get the last directory node
+ d, err := s.walk(dirName, s.checkDir)
+
+ if err != nil {
+ s.Stats.Inc(SetFail)
+ reportWriteFailure(action)
+ err.Index = currIndex
+ return nil, err
+ }
+
+ e := newEvent(action, nodePath, nextIndex, nextIndex)
+ eNode := e.Node
+
+ n, _ := d.GetChild(nodeName)
+
+ // force will try to replace an existing file
+ if n != nil {
+ if replace {
+ if n.IsDir() {
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex)
+ }
+ e.PrevNode = n.Repr(false, false, s.clock)
+
+ n.Remove(false, false, nil)
+ } else {
+ return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, currIndex)
+ }
+ }
+
+ if !dir { // create file
+ // copy the value for safety
+ valueCopy := value
+ eNode.Value = &valueCopy
+
+ n = newKV(s, nodePath, value, nextIndex, d, expireTime)
+
+ } else { // create directory
+ eNode.Dir = true
+
+ n = newDir(s, nodePath, nextIndex, d, expireTime)
+ }
+
+ // we are sure d is a directory and does not have the children with name n.Name
+ d.Add(n)
+
+ // node with TTL
+ if !n.IsPermanent() {
+ s.ttlKeyHeap.push(n)
+
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
+ }
+
+ s.CurrentIndex = nextIndex
+
+ return e, nil
+}
+
+// InternalGet gets the node of the given nodePath.
+func (s *store) internalGet(nodePath string) (*node, *v2error.Error) {
+ nodePath = path.Clean(path.Join("/", nodePath))
+
+ walkFunc := func(parent *node, name string) (*node, *v2error.Error) {
+
+ if !parent.IsDir() {
+ err := v2error.NewError(v2error.EcodeNotDir, parent.Path, s.CurrentIndex)
+ return nil, err
+ }
+
+ child, ok := parent.Children[name]
+ if ok {
+ return child, nil
+ }
+
+ return nil, v2error.NewError(v2error.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex)
+ }
+
+ f, err := s.walk(nodePath, walkFunc)
+
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// DeleteExpiredKeys will delete all expired keys
+func (s *store) DeleteExpiredKeys(cutoff time.Time) {
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ for {
+ node := s.ttlKeyHeap.top()
+ if node == nil || node.ExpireTime.After(cutoff) {
+ break
+ }
+
+ s.CurrentIndex++
+ e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.PrevNode = node.Repr(false, false, s.clock)
+ if node.IsDir() {
+ e.Node.Dir = true
+ }
+
+ callback := func(path string) { // notify function
+ // notify the watchers with deleted set true
+ s.WatcherHub.notifyWatchers(e, path, true)
+ }
+
+ s.ttlKeyHeap.pop()
+ node.Remove(true, true, callback)
+
+ reportExpiredKey()
+ s.Stats.Inc(ExpireCount)
+
+ s.WatcherHub.notify(e)
+ }
+
+}
+
+// checkDir will check whether the component is a directory under parent node.
+// If it is a directory, this function will return the pointer to that node.
+// If it does not exist, this function will create a new directory and return the pointer to that node.
+// If it is a file, this function will return error.
+func (s *store) checkDir(parent *node, dirName string) (*node, *v2error.Error) {
+ node, ok := parent.Children[dirName]
+
+ if ok {
+ if node.IsDir() {
+ return node, nil
+ }
+
+ return nil, v2error.NewError(v2error.EcodeNotDir, node.Path, s.CurrentIndex)
+ }
+
+ n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent)
+
+ parent.Children[dirName] = n
+
+ return n, nil
+}
+
+// Save saves the static state of the store system.
+// It will not be able to save the state of watchers.
+// It will not save the parent field of the node. Or there will
+// be cyclic dependencies issue for the json package.
+func (s *store) Save() ([]byte, error) {
+ b, err := json.Marshal(s.Clone())
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+func (s *store) SaveNoCopy() ([]byte, error) {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+func (s *store) Clone() Store {
+ s.worldLock.Lock()
+
+ clonedStore := newStore()
+ clonedStore.CurrentIndex = s.CurrentIndex
+ clonedStore.Root = s.Root.Clone()
+ clonedStore.WatcherHub = s.WatcherHub.clone()
+ clonedStore.Stats = s.Stats.clone()
+ clonedStore.CurrentVersion = s.CurrentVersion
+
+ s.worldLock.Unlock()
+ return clonedStore
+}
+
+// Recovery recovers the store system from a static state
+// It needs to recover the parent field of the nodes.
+// It needs to delete the expired nodes since the saved time and also
+// needs to create monitoring go routines.
+func (s *store) Recovery(state []byte) error {
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+ err := json.Unmarshal(state, s)
+
+ if err != nil {
+ return err
+ }
+
+ s.ttlKeyHeap = newTtlKeyHeap()
+
+ s.Root.recoverAndclean()
+ return nil
+}
+
+func (s *store) JsonStats() []byte {
+ s.Stats.Watchers = uint64(s.WatcherHub.count)
+ return s.Stats.toJson()
+}
+
+func (s *store) HasTTLKeys() bool {
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+ return s.ttlKeyHeap.Len() != 0
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/ttl_key_heap.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/ttl_key_heap.go
new file mode 100644
index 000000000000..477d2b9f3aa1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/ttl_key_heap.go
@@ -0,0 +1,97 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import "container/heap"
+
+// An TTLKeyHeap is a min-heap of TTLKeys order by expiration time
+type ttlKeyHeap struct {
+ array []*node
+ keyMap map[*node]int
+}
+
+func newTtlKeyHeap() *ttlKeyHeap {
+ h := &ttlKeyHeap{keyMap: make(map[*node]int)}
+ heap.Init(h)
+ return h
+}
+
+func (h ttlKeyHeap) Len() int {
+ return len(h.array)
+}
+
+func (h ttlKeyHeap) Less(i, j int) bool {
+ return h.array[i].ExpireTime.Before(h.array[j].ExpireTime)
+}
+
+func (h ttlKeyHeap) Swap(i, j int) {
+ // swap node
+ h.array[i], h.array[j] = h.array[j], h.array[i]
+
+ // update map
+ h.keyMap[h.array[i]] = i
+ h.keyMap[h.array[j]] = j
+}
+
+func (h *ttlKeyHeap) Push(x interface{}) {
+ n, _ := x.(*node)
+ h.keyMap[n] = len(h.array)
+ h.array = append(h.array, n)
+}
+
+func (h *ttlKeyHeap) Pop() interface{} {
+ old := h.array
+ n := len(old)
+ x := old[n-1]
+ // Set slice element to nil, so GC can recycle the node.
+ // This is due to golang GC doesn't support partial recycling:
+ // https://github.com/golang/go/issues/9618
+ old[n-1] = nil
+ h.array = old[0 : n-1]
+ delete(h.keyMap, x)
+ return x
+}
+
+func (h *ttlKeyHeap) top() *node {
+ if h.Len() != 0 {
+ return h.array[0]
+ }
+ return nil
+}
+
+func (h *ttlKeyHeap) pop() *node {
+ x := heap.Pop(h)
+ n, _ := x.(*node)
+ return n
+}
+
+func (h *ttlKeyHeap) push(x interface{}) {
+ heap.Push(h, x)
+}
+
+func (h *ttlKeyHeap) update(n *node) {
+ index, ok := h.keyMap[n]
+ if ok {
+ heap.Remove(h, index)
+ heap.Push(h, n)
+ }
+}
+
+func (h *ttlKeyHeap) remove(n *node) {
+ index, ok := h.keyMap[n]
+ if ok {
+ heap.Remove(h, index)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher.go
new file mode 100644
index 000000000000..4b1e846a2f96
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher.go
@@ -0,0 +1,95 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+type Watcher interface {
+ EventChan() chan *Event
+ StartIndex() uint64 // The EtcdIndex at which the Watcher was created
+ Remove()
+}
+
+type watcher struct {
+ eventChan chan *Event
+ stream bool
+ recursive bool
+ sinceIndex uint64
+ startIndex uint64
+ hub *watcherHub
+ removed bool
+ remove func()
+}
+
+func (w *watcher) EventChan() chan *Event {
+ return w.eventChan
+}
+
+func (w *watcher) StartIndex() uint64 {
+ return w.startIndex
+}
+
+// notify function notifies the watcher. If the watcher interests in the given path,
+// the function will return true.
+func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool {
+ // watcher is interested the path in three cases and under one condition
+ // the condition is that the event happens after the watcher's sinceIndex
+
+ // 1. the path at which the event happens is the path the watcher is watching at.
+ // For example if the watcher is watching at "/foo" and the event happens at "/foo",
+ // the watcher must be interested in that event.
+
+ // 2. the watcher is a recursive watcher, it interests in the event happens after
+ // its watching path. For example if watcher A watches at "/foo" and it is a recursive
+ // one, it will interest in the event happens at "/foo/bar".
+
+ // 3. when we delete a directory, we need to force notify all the watchers who watches
+ // at the file we need to delete.
+ // For example a watcher is watching at "/foo/bar". And we deletes "/foo". The watcher
+ // should get notified even if "/foo" is not the path it is watching.
+ if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex {
+ // We cannot block here if the eventChan capacity is full, otherwise
+ // etcd will hang. eventChan capacity is full when the rate of
+ // notifications are higher than our send rate.
+ // If this happens, we close the channel.
+ select {
+ case w.eventChan <- e:
+ default:
+ // We have missed a notification. Remove the watcher.
+ // Removing the watcher also closes the eventChan.
+ w.remove()
+ }
+ return true
+ }
+ return false
+}
+
+// Remove removes the watcher from watcherHub
+// The actual remove function is guaranteed to only be executed once
+func (w *watcher) Remove() {
+ w.hub.mutex.Lock()
+ defer w.hub.mutex.Unlock()
+
+ close(w.eventChan)
+ if w.remove != nil {
+ w.remove()
+ }
+}
+
+// nopWatcher is a watcher that receives nothing, always blocking.
+type nopWatcher struct{}
+
+func NewNopWatcher() Watcher { return &nopWatcher{} }
+func (w *nopWatcher) EventChan() chan *Event { return nil }
+func (w *nopWatcher) StartIndex() uint64 { return 0 }
+func (w *nopWatcher) Remove() {}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher_hub.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher_hub.go
new file mode 100644
index 000000000000..a452e7e951f6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2store/watcher_hub.go
@@ -0,0 +1,200 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "container/list"
+ "path"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+)
+
+// A watcherHub contains all subscribed watchers
+// watchers is a map with watched path as key and watcher as value
+// EventHistory keeps the old events for watcherHub. It is used to help
+// watcher to get a continuous event history. Or a watcher might miss the
+// event happens between the end of the first watch command and the start
+// of the second command.
+type watcherHub struct {
+ // count must be the first element to keep 64-bit alignment for atomic
+ // access
+
+ count int64 // current number of watchers.
+
+ mutex sync.Mutex
+ watchers map[string]*list.List
+ EventHistory *EventHistory
+}
+
+// newWatchHub creates a watcherHub. The capacity determines how many events we will
+// keep in the eventHistory.
+// Typically, we only need to keep a small size of history[smaller than 20K].
+// Ideally, it should smaller than 20K/s[max throughput] * 2 * 50ms[RTT] = 2000
+func newWatchHub(capacity int) *watcherHub {
+ return &watcherHub{
+ watchers: make(map[string]*list.List),
+ EventHistory: newEventHistory(capacity),
+ }
+}
+
+// Watch function returns a Watcher.
+// If recursive is true, the first change after index under key will be sent to the event channel of the watcher.
+// If recursive is false, the first change after index at key will be sent to the event channel of the watcher.
+// If index is zero, watch will start from the current index + 1.
+func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *v2error.Error) {
+ reportWatchRequest()
+ event, err := wh.EventHistory.scan(key, recursive, index)
+
+ if err != nil {
+ err.Index = storeIndex
+ return nil, err
+ }
+
+ w := &watcher{
+ eventChan: make(chan *Event, 100), // use a buffered channel
+ recursive: recursive,
+ stream: stream,
+ sinceIndex: index,
+ startIndex: storeIndex,
+ hub: wh,
+ }
+
+ wh.mutex.Lock()
+ defer wh.mutex.Unlock()
+ // If the event exists in the known history, append the EtcdIndex and return immediately
+ if event != nil {
+ ne := event.Clone()
+ ne.EtcdIndex = storeIndex
+ w.eventChan <- ne
+ return w, nil
+ }
+
+ l, ok := wh.watchers[key]
+
+ var elem *list.Element
+
+ if ok { // add the new watcher to the back of the list
+ elem = l.PushBack(w)
+ } else { // create a new list and add the new watcher
+ l = list.New()
+ elem = l.PushBack(w)
+ wh.watchers[key] = l
+ }
+
+ w.remove = func() {
+ if w.removed { // avoid removing it twice
+ return
+ }
+ w.removed = true
+ l.Remove(elem)
+ atomic.AddInt64(&wh.count, -1)
+ reportWatcherRemoved()
+ if l.Len() == 0 {
+ delete(wh.watchers, key)
+ }
+ }
+
+ atomic.AddInt64(&wh.count, 1)
+ reportWatcherAdded()
+
+ return w, nil
+}
+
+func (wh *watcherHub) add(e *Event) {
+ wh.EventHistory.addEvent(e)
+}
+
+// notify function accepts an event and notify to the watchers.
+func (wh *watcherHub) notify(e *Event) {
+ e = wh.EventHistory.addEvent(e) // add event into the eventHistory
+
+ segments := strings.Split(e.Node.Key, "/")
+
+ currPath := "/"
+
+ // walk through all the segments of the path and notify the watchers
+ // if the path is "/foo/bar", it will notify watchers with path "/",
+ // "/foo" and "/foo/bar"
+
+ for _, segment := range segments {
+ currPath = path.Join(currPath, segment)
+ // notify the watchers who interests in the changes of current path
+ wh.notifyWatchers(e, currPath, false)
+ }
+}
+
+func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) {
+ wh.mutex.Lock()
+ defer wh.mutex.Unlock()
+
+ l, ok := wh.watchers[nodePath]
+ if ok {
+ curr := l.Front()
+
+ for curr != nil {
+ next := curr.Next() // save reference to the next one in the list
+
+ w, _ := curr.Value.(*watcher)
+
+ originalPath := e.Node.Key == nodePath
+ if (originalPath || !isHidden(nodePath, e.Node.Key)) && w.notify(e, originalPath, deleted) {
+ if !w.stream { // do not remove the stream watcher
+ // if we successfully notify a watcher
+ // we need to remove the watcher from the list
+ // and decrease the counter
+ w.removed = true
+ l.Remove(curr)
+ atomic.AddInt64(&wh.count, -1)
+ reportWatcherRemoved()
+ }
+ }
+
+ curr = next // update current to the next element in the list
+ }
+
+ if l.Len() == 0 {
+ // if we have notified all watcher in the list
+ // we can delete the list
+ delete(wh.watchers, nodePath)
+ }
+ }
+}
+
+// clone function clones the watcherHub and return the cloned one.
+// only clone the static content. do not clone the current watchers.
+func (wh *watcherHub) clone() *watcherHub {
+ clonedHistory := wh.EventHistory.clone()
+
+ return &watcherHub{
+ EventHistory: clonedHistory,
+ }
+}
+
+// isHidden checks to see if key path is considered hidden to watch path i.e. the
+// last element is hidden or it's within a hidden directory
+func isHidden(watchPath, keyPath string) bool {
+ // When deleting a directory, watchPath might be deeper than the actual keyPath
+ // For example, when deleting /foo we also need to notify watchers on /foo/bar.
+ if len(watchPath) > len(keyPath) {
+ return false
+ }
+ // if watch path is just a "/", after path will start without "/"
+ // add a "/" to deal with the special case when watchPath is "/"
+ afterPath := path.Clean("/" + keyPath[len(watchPath):])
+ return strings.Contains(afterPath, "/_")
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/cluster.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/cluster.go
new file mode 100644
index 000000000000..a22e4afad152
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/cluster.go
@@ -0,0 +1,31 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/coreos/go-semver/semver"
+)
+
+func (s *v2v3Server) ID() types.ID {
+ // TODO: use an actual member ID
+ return types.ID(0xe7cd2f00d)
+}
+func (s *v2v3Server) ClientURLs() []string { panic("STUB") }
+func (s *v2v3Server) Members() []*membership.Member { panic("STUB") }
+func (s *v2v3Server) Member(id types.ID) *membership.Member { panic("STUB") }
+func (s *v2v3Server) Version() *semver.Version { panic("STUB") }
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/doc.go
new file mode 100644
index 000000000000..2ff372f18760
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2v3 provides a ServerV2 implementation backed by clientv3.Client.
+package v2v3
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/server.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/server.go
new file mode 100644
index 000000000000..5ff9b96c0c42
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/server.go
@@ -0,0 +1,129 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "go.etcd.io/etcd/clientv3"
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+type fakeStats struct{}
+
+func (s *fakeStats) SelfStats() []byte { return nil }
+func (s *fakeStats) LeaderStats() []byte { return nil }
+func (s *fakeStats) StoreStats() []byte { return nil }
+
+type v2v3Server struct {
+ lg *zap.Logger
+ c *clientv3.Client
+ store *v2v3Store
+ fakeStats
+}
+
+func NewServer(lg *zap.Logger, c *clientv3.Client, pfx string) etcdserver.ServerPeer {
+ return &v2v3Server{lg: lg, c: c, store: newStore(c, pfx)}
+}
+
+func (s *v2v3Server) ClientCertAuthEnabled() bool { return false }
+
+func (s *v2v3Server) LeaseHandler() http.Handler { panic("STUB: lease handler") }
+func (s *v2v3Server) RaftHandler() http.Handler { panic("STUB: raft handler") }
+
+func (s *v2v3Server) Leader() types.ID {
+ ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
+ defer cancel()
+ resp, err := s.c.Status(ctx, s.c.Endpoints()[0])
+ if err != nil {
+ return 0
+ }
+ return types.ID(resp.Leader)
+}
+
+func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+ // adding member as learner is not supported by V2 Server.
+ resp, err := s.c.MemberAdd(ctx, memb.PeerURLs)
+ if err != nil {
+ return nil, err
+ }
+ return v3MembersToMembership(resp.Members), nil
+}
+
+func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ resp, err := s.c.MemberRemove(ctx, id)
+ if err != nil {
+ return nil, err
+ }
+ return v3MembersToMembership(resp.Members), nil
+}
+
+func (s *v2v3Server) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ resp, err := s.c.MemberPromote(ctx, id)
+ if err != nil {
+ return nil, err
+ }
+ return v3MembersToMembership(resp.Members), nil
+}
+
+func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) {
+ resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs)
+ if err != nil {
+ return nil, err
+ }
+ return v3MembersToMembership(resp.Members), nil
+}
+
+func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member {
+ membs := make([]*membership.Member, len(v3membs))
+ for i, m := range v3membs {
+ membs[i] = &membership.Member{
+ ID: types.ID(m.ID),
+ RaftAttributes: membership.RaftAttributes{
+ PeerURLs: m.PeerURLs,
+ IsLearner: m.IsLearner,
+ },
+ Attributes: membership.Attributes{
+ Name: m.Name,
+ ClientURLs: m.ClientURLs,
+ },
+ }
+ }
+ return membs
+}
+
+func (s *v2v3Server) ClusterVersion() *semver.Version { return s.Version() }
+func (s *v2v3Server) Cluster() api.Cluster { return s }
+func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil }
+
+func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
+ applier := etcdserver.NewApplierV2(s.lg, s.store, nil)
+ reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier)
+ req := (*etcdserver.RequestV2)(&r)
+ resp, err := req.Handle(ctx, reqHandler)
+ if resp.Err != nil {
+ return resp, resp.Err
+ }
+ return resp, err
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/store.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/store.go
new file mode 100644
index 000000000000..f1c7ab3784d9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/store.go
@@ -0,0 +1,638 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+ "context"
+ "fmt"
+ "path"
+ "sort"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/clientv3"
+ "go.etcd.io/etcd/clientv3/concurrency"
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+ "go.etcd.io/etcd/etcdserver/api/v2store"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+)
+
+// store implements the Store interface for V2 using
+// a v3 client.
+type v2v3Store struct {
+ c *clientv3.Client
+ // pfx is the v3 prefix where keys should be stored.
+ pfx string
+ ctx context.Context
+}
+
+const maxPathDepth = 63
+
+var errUnsupported = fmt.Errorf("TTLs are unsupported")
+
+func NewStore(c *clientv3.Client, pfx string) v2store.Store { return newStore(c, pfx) }
+
+func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} }
+
+func (s *v2v3Store) Index() uint64 { panic("STUB") }
+
+func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*v2store.Event, error) {
+ key := s.mkPath(nodePath)
+ resp, err := s.c.Txn(s.ctx).Then(
+ clientv3.OpGet(key+"/"),
+ clientv3.OpGet(key),
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+
+ if kvs := resp.Responses[0].GetResponseRange().Kvs; len(kvs) != 0 || isRoot(nodePath) {
+ nodes, err := s.getDir(nodePath, recursive, sorted, resp.Header.Revision)
+ if err != nil {
+ return nil, err
+ }
+ cidx, midx := uint64(0), uint64(0)
+ if len(kvs) > 0 {
+ cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision)
+ }
+ return &v2store.Event{
+ Action: v2store.Get,
+ Node: &v2store.NodeExtern{
+ Key: nodePath,
+ Dir: true,
+ Nodes: nodes,
+ CreatedIndex: cidx,
+ ModifiedIndex: midx,
+ },
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+ }
+
+ kvs := resp.Responses[1].GetResponseRange().Kvs
+ if len(kvs) == 0 {
+ return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+
+ return &v2store.Event{
+ Action: v2store.Get,
+ Node: s.mkV2Node(kvs[0]),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*v2store.NodeExtern, error) {
+ rootNodes, err := s.getDirDepth(nodePath, 1, rev)
+ if err != nil || !recursive {
+ if sorted {
+ sort.Sort(v2store.NodeExterns(rootNodes))
+ }
+ return rootNodes, err
+ }
+ nextNodes := rootNodes
+ nodes := make(map[string]*v2store.NodeExtern)
+ // Breadth walk the subdirectories
+ for i := 2; len(nextNodes) > 0; i++ {
+ for _, n := range nextNodes {
+ nodes[n.Key] = n
+ if parent := nodes[path.Dir(n.Key)]; parent != nil {
+ parent.Nodes = append(parent.Nodes, n)
+ }
+ }
+ if nextNodes, err = s.getDirDepth(nodePath, i, rev); err != nil {
+ return nil, err
+ }
+ }
+
+ if sorted {
+ sort.Sort(v2store.NodeExterns(rootNodes))
+ }
+ return rootNodes, nil
+}
+
+func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*v2store.NodeExtern, error) {
+ pd := s.mkPathDepth(nodePath, depth)
+ resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev))
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := make([]*v2store.NodeExtern, len(resp.Kvs))
+ for i, kv := range resp.Kvs {
+ nodes[i] = s.mkV2Node(kv)
+ }
+ return nodes, nil
+}
+
+func (s *v2v3Store) Set(
+ nodePath string,
+ dir bool,
+ value string,
+ expireOpts v2store.TTLOptionSet,
+) (*v2store.Event, error) {
+ if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+ return nil, errUnsupported
+ }
+
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+
+ ecode := 0
+ applyf := func(stm concurrency.STM) error {
+ // build path if any directories in path do not exist
+ dirs := []string{}
+ for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) {
+ pp := s.mkPath(p)
+ if stm.Rev(pp) > 0 {
+ ecode = v2error.EcodeNotDir
+ return nil
+ }
+ if stm.Rev(pp+"/") == 0 {
+ dirs = append(dirs, pp+"/")
+ }
+ }
+ for _, d := range dirs {
+ stm.Put(d, "")
+ }
+
+ key := s.mkPath(nodePath)
+ if dir {
+ if stm.Rev(key) != 0 {
+ // exists as non-dir
+ ecode = v2error.EcodeNotDir
+ return nil
+ }
+ key = key + "/"
+ } else if stm.Rev(key+"/") != 0 {
+ ecode = v2error.EcodeNotFile
+ return nil
+ }
+ stm.Put(key, value, clientv3.WithPrevKV())
+ stm.Put(s.mkActionKey(), v2store.Set)
+ return nil
+ }
+
+ resp, err := s.newSTM(applyf)
+ if err != nil {
+ return nil, err
+ }
+ if ecode != 0 {
+ return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+
+ createRev := resp.Header.Revision
+ var pn *v2store.NodeExtern
+ if pkv := prevKeyFromPuts(resp); pkv != nil {
+ pn = s.mkV2Node(pkv)
+ createRev = pkv.CreateRevision
+ }
+
+ vp := &value
+ if dir {
+ vp = nil
+ }
+ return &v2store.Event{
+ Action: v2store.Set,
+ Node: &v2store.NodeExtern{
+ Key: nodePath,
+ Value: vp,
+ Dir: dir,
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ CreatedIndex: mkV2Rev(createRev),
+ },
+ PrevNode: pn,
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) Update(nodePath, newValue string, expireOpts v2store.TTLOptionSet) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+
+ if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+ return nil, errUnsupported
+ }
+
+ key := s.mkPath(nodePath)
+ ecode := 0
+ applyf := func(stm concurrency.STM) error {
+ if rev := stm.Rev(key + "/"); rev != 0 {
+ ecode = v2error.EcodeNotFile
+ return nil
+ }
+ if rev := stm.Rev(key); rev == 0 {
+ ecode = v2error.EcodeKeyNotFound
+ return nil
+ }
+ stm.Put(key, newValue, clientv3.WithPrevKV())
+ stm.Put(s.mkActionKey(), v2store.Update)
+ return nil
+ }
+
+ resp, err := s.newSTM(applyf)
+ if err != nil {
+ return nil, err
+ }
+ if ecode != 0 {
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+
+ pkv := prevKeyFromPuts(resp)
+ return &v2store.Event{
+ Action: v2store.Update,
+ Node: &v2store.NodeExtern{
+ Key: nodePath,
+ Value: &newValue,
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ CreatedIndex: mkV2Rev(pkv.CreateRevision),
+ },
+ PrevNode: s.mkV2Node(pkv),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) Create(
+ nodePath string,
+ dir bool,
+ value string,
+ unique bool,
+ expireOpts v2store.TTLOptionSet,
+) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+ if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+ return nil, errUnsupported
+ }
+ ecode := 0
+ applyf := func(stm concurrency.STM) error {
+ ecode = 0
+ key := s.mkPath(nodePath)
+ if unique {
+ // append unique item under the node path
+ for {
+ key = nodePath + "/" + fmt.Sprintf("%020s", time.Now())
+ key = path.Clean(path.Join("/", key))
+ key = s.mkPath(key)
+ if stm.Rev(key) == 0 {
+ break
+ }
+ }
+ }
+ if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 {
+ ecode = v2error.EcodeNodeExist
+ return nil
+ }
+ // build path if any directories in path do not exist
+ dirs := []string{}
+ for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) {
+ pp := s.mkPath(p)
+ if stm.Rev(pp) > 0 {
+ ecode = v2error.EcodeNotDir
+ return nil
+ }
+ if stm.Rev(pp+"/") == 0 {
+ dirs = append(dirs, pp+"/")
+ }
+ }
+ for _, d := range dirs {
+ stm.Put(d, "")
+ }
+
+ if dir {
+ // directories marked with extra slash in key name
+ key += "/"
+ }
+ stm.Put(key, value)
+ stm.Put(s.mkActionKey(), v2store.Create)
+ return nil
+ }
+
+ resp, err := s.newSTM(applyf)
+ if err != nil {
+ return nil, err
+ }
+ if ecode != 0 {
+ return nil, v2error.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+
+ var v *string
+ if !dir {
+ v = &value
+ }
+
+ return &v2store.Event{
+ Action: v2store.Create,
+ Node: &v2store.NodeExtern{
+ Key: nodePath,
+ Value: v,
+ Dir: dir,
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ CreatedIndex: mkV2Rev(resp.Header.Revision),
+ },
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) CompareAndSwap(
+ nodePath string,
+ prevValue string,
+ prevIndex uint64,
+ value string,
+ expireOpts v2store.TTLOptionSet,
+) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+ if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
+ return nil, errUnsupported
+ }
+
+ key := s.mkPath(nodePath)
+ resp, err := s.c.Txn(s.ctx).If(
+ s.mkCompare(nodePath, prevValue, prevIndex)...,
+ ).Then(
+ clientv3.OpPut(key, value, clientv3.WithPrevKV()),
+ clientv3.OpPut(s.mkActionKey(), v2store.CompareAndSwap),
+ ).Else(
+ clientv3.OpGet(key),
+ clientv3.OpGet(key+"/"),
+ ).Commit()
+
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, compareFail(nodePath, prevValue, prevIndex, resp)
+ }
+
+ pkv := resp.Responses[0].GetResponsePut().PrevKv
+ return &v2store.Event{
+ Action: v2store.CompareAndSwap,
+ Node: &v2store.NodeExtern{
+ Key: nodePath,
+ Value: &value,
+ CreatedIndex: mkV2Rev(pkv.CreateRevision),
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ },
+ PrevNode: s.mkV2Node(pkv),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+ if !dir && !recursive {
+ return s.deleteNode(nodePath)
+ }
+ if !recursive {
+ return s.deleteEmptyDir(nodePath)
+ }
+
+ dels := make([]clientv3.Op, maxPathDepth+1)
+ dels[0] = clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV())
+ for i := 1; i < maxPathDepth; i++ {
+ dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix())
+ }
+ dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), v2store.Delete)
+
+ resp, err := s.c.Txn(s.ctx).If(
+ clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0),
+ clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, maxPathDepth)+"/"), "=", 0),
+ ).Then(
+ dels...,
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ dresp := resp.Responses[0].GetResponseDeleteRange()
+ return &v2store.Event{
+ Action: v2store.Delete,
+ PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) deleteEmptyDir(nodePath string) (*v2store.Event, error) {
+ resp, err := s.c.Txn(s.ctx).If(
+ clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(),
+ ).Then(
+ clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()),
+ clientv3.OpPut(s.mkActionKey(), v2store.Delete),
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, v2error.NewError(v2error.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ dresp := resp.Responses[0].GetResponseDeleteRange()
+ if len(dresp.PrevKvs) == 0 {
+ return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ return &v2store.Event{
+ Action: v2store.Delete,
+ PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) deleteNode(nodePath string) (*v2store.Event, error) {
+ resp, err := s.c.Txn(s.ctx).If(
+ clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0),
+ ).Then(
+ clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()),
+ clientv3.OpPut(s.mkActionKey(), v2store.Delete),
+ ).Commit()
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs
+ if len(pkvs) == 0 {
+ return nil, v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ pkv := pkvs[0]
+ return &v2store.Event{
+ Action: v2store.Delete,
+ Node: &v2store.NodeExtern{
+ Key: nodePath,
+ CreatedIndex: mkV2Rev(pkv.CreateRevision),
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ },
+ PrevNode: s.mkV2Node(pkv),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*v2store.Event, error) {
+ if isRoot(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, nodePath, 0)
+ }
+
+ key := s.mkPath(nodePath)
+ resp, err := s.c.Txn(s.ctx).If(
+ s.mkCompare(nodePath, prevValue, prevIndex)...,
+ ).Then(
+ clientv3.OpDelete(key, clientv3.WithPrevKV()),
+ clientv3.OpPut(s.mkActionKey(), v2store.CompareAndDelete),
+ ).Else(
+ clientv3.OpGet(key),
+ clientv3.OpGet(key+"/"),
+ ).Commit()
+
+ if err != nil {
+ return nil, err
+ }
+ if !resp.Succeeded {
+ return nil, compareFail(nodePath, prevValue, prevIndex, resp)
+ }
+
+ // len(pkvs) > 1 since txn only succeeds when key exists
+ pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0]
+ return &v2store.Event{
+ Action: v2store.CompareAndDelete,
+ Node: &v2store.NodeExtern{
+ Key: nodePath,
+ CreatedIndex: mkV2Rev(pkv.CreateRevision),
+ ModifiedIndex: mkV2Rev(resp.Header.Revision),
+ },
+ PrevNode: s.mkV2Node(pkv),
+ EtcdIndex: mkV2Rev(resp.Header.Revision),
+ }, nil
+}
+
+func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error {
+ if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 {
+ return v2error.NewError(v2error.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ kvs := resp.Responses[0].GetResponseRange().Kvs
+ if len(kvs) == 0 {
+ return v2error.NewError(v2error.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
+ }
+ kv := kvs[0]
+ indexMatch := prevIndex == 0 || kv.ModRevision == int64(prevIndex)
+ valueMatch := prevValue == "" || string(kv.Value) == prevValue
+ var cause string
+ switch {
+ case indexMatch && !valueMatch:
+ cause = fmt.Sprintf("[%v != %v]", prevValue, string(kv.Value))
+ case valueMatch && !indexMatch:
+ cause = fmt.Sprintf("[%v != %v]", prevIndex, kv.ModRevision)
+ default:
+ cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision)
+ }
+ return v2error.NewError(v2error.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision))
+}
+
+func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp {
+ key := s.mkPath(nodePath)
+ cmps := []clientv3.Cmp{clientv3.Compare(clientv3.Version(key), ">", 0)}
+ if prevIndex != 0 {
+ cmps = append(cmps, clientv3.Compare(clientv3.ModRevision(key), "=", mkV3Rev(prevIndex)))
+ }
+ if prevValue != "" {
+ cmps = append(cmps, clientv3.Compare(clientv3.Value(key), "=", prevValue))
+ }
+ return cmps
+}
+
+func (s *v2v3Store) JsonStats() []byte { panic("STUB") }
+func (s *v2v3Store) DeleteExpiredKeys(cutoff time.Time) { panic("STUB") }
+
+func (s *v2v3Store) Version() int { return 2 }
+
+// TODO: move this out of the Store interface?
+
+func (s *v2v3Store) Save() ([]byte, error) { panic("STUB") }
+func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") }
+func (s *v2v3Store) Clone() v2store.Store { panic("STUB") }
+func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") }
+func (s *v2v3Store) HasTTLKeys() bool { panic("STUB") }
+
+func (s *v2v3Store) mkPath(nodePath string) string { return s.mkPathDepth(nodePath, 0) }
+
+func (s *v2v3Store) mkNodePath(p string) string {
+ return path.Clean(p[len(s.pfx)+len("/k/000/"):])
+}
+
+// mkPathDepth makes a path to a key that encodes its directory depth
+// for fast directory listing. If a depth is provided, it is added
+// to the computed depth.
+func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string {
+ normalForm := path.Clean(path.Join("/", nodePath))
+ n := strings.Count(normalForm, "/") + depth
+ return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm)
+}
+
+func (s *v2v3Store) mkActionKey() string { return s.pfx + "/act" }
+
+func isRoot(s string) bool { return len(s) == 0 || s == "/" || s == "/0" || s == "/1" }
+
+func mkV2Rev(v3Rev int64) uint64 {
+ if v3Rev == 0 {
+ return 0
+ }
+ return uint64(v3Rev - 1)
+}
+
+func mkV3Rev(v2Rev uint64) int64 {
+ if v2Rev == 0 {
+ return 0
+ }
+ return int64(v2Rev + 1)
+}
+
+// mkV2Node creates a V2 NodeExtern from a V3 KeyValue
+func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *v2store.NodeExtern {
+ if kv == nil {
+ return nil
+ }
+ n := &v2store.NodeExtern{
+ Key: s.mkNodePath(string(kv.Key)),
+ Dir: kv.Key[len(kv.Key)-1] == '/',
+ CreatedIndex: mkV2Rev(kv.CreateRevision),
+ ModifiedIndex: mkV2Rev(kv.ModRevision),
+ }
+ if !n.Dir {
+ v := string(kv.Value)
+ n.Value = &v
+ }
+ return n
+}
+
+// prevKeyFromPuts gets the prev key that is being put; ignores
+// the put action response.
+func prevKeyFromPuts(resp *clientv3.TxnResponse) *mvccpb.KeyValue {
+ for _, r := range resp.Responses {
+ pkv := r.GetResponsePut().PrevKv
+ if pkv != nil && pkv.CreateRevision > 0 {
+ return pkv
+ }
+ }
+ return nil
+}
+
+func (s *v2v3Store) newSTM(applyf func(concurrency.STM) error) (*clientv3.TxnResponse, error) {
+ return concurrency.NewSTM(s.c, applyf, concurrency.WithIsolation(concurrency.Serializable))
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/watcher.go b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/watcher.go
new file mode 100644
index 000000000000..e8a3557c1e91
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v2v3/watcher.go
@@ -0,0 +1,140 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2v3
+
+import (
+ "context"
+ "strings"
+
+ "go.etcd.io/etcd/clientv3"
+ "go.etcd.io/etcd/etcdserver/api/v2error"
+ "go.etcd.io/etcd/etcdserver/api/v2store"
+)
+
+func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (v2store.Watcher, error) {
+ ctx, cancel := context.WithCancel(s.ctx)
+ wch := s.c.Watch(
+ ctx,
+ // TODO: very pricey; use a single store-wide watch in future
+ s.pfx,
+ clientv3.WithPrefix(),
+ clientv3.WithRev(int64(sinceIndex)),
+ clientv3.WithCreatedNotify(),
+ clientv3.WithPrevKV())
+ resp, ok := <-wch
+ if err := resp.Err(); err != nil || !ok {
+ cancel()
+ return nil, v2error.NewError(v2error.EcodeRaftInternal, prefix, 0)
+ }
+
+ evc, donec := make(chan *v2store.Event), make(chan struct{})
+ go func() {
+ defer func() {
+ close(evc)
+ close(donec)
+ }()
+ for resp := range wch {
+ for _, ev := range s.mkV2Events(resp) {
+ k := ev.Node.Key
+ if recursive {
+ if !strings.HasPrefix(k, prefix) {
+ continue
+ }
+ // accept events on hidden keys given in prefix
+ k = strings.Replace(k, prefix, "/", 1)
+ // ignore hidden keys deeper than prefix
+ if strings.Contains(k, "/_") {
+ continue
+ }
+ }
+ if !recursive && k != prefix {
+ continue
+ }
+ select {
+ case evc <- ev:
+ case <-ctx.Done():
+ return
+ }
+ if !stream {
+ return
+ }
+ }
+ }
+ }()
+
+ return &v2v3Watcher{
+ startRev: resp.Header.Revision,
+ evc: evc,
+ donec: donec,
+ cancel: cancel,
+ }, nil
+}
+
+func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*v2store.Event) {
+ ak := s.mkActionKey()
+ for _, rev := range mkRevs(wr) {
+ var act, key *clientv3.Event
+ for _, ev := range rev {
+ if string(ev.Kv.Key) == ak {
+ act = ev
+ } else if key != nil && len(key.Kv.Key) < len(ev.Kv.Key) {
+ // use longest key to ignore intermediate new
+ // directories from Create.
+ key = ev
+ } else if key == nil {
+ key = ev
+ }
+ }
+ v2ev := &v2store.Event{
+ Action: string(act.Kv.Value),
+ Node: s.mkV2Node(key.Kv),
+ PrevNode: s.mkV2Node(key.PrevKv),
+ EtcdIndex: mkV2Rev(wr.Header.Revision),
+ }
+ evs = append(evs, v2ev)
+ }
+ return evs
+}
+
+func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) {
+ var curRev []*clientv3.Event
+ for _, ev := range wr.Events {
+ if curRev != nil && ev.Kv.ModRevision != curRev[0].Kv.ModRevision {
+ revs = append(revs, curRev)
+ curRev = nil
+ }
+ curRev = append(curRev, ev)
+ }
+ if curRev != nil {
+ revs = append(revs, curRev)
+ }
+ return revs
+}
+
+type v2v3Watcher struct {
+ startRev int64
+ evc chan *v2store.Event
+ donec chan struct{}
+ cancel context.CancelFunc
+}
+
+func (w *v2v3Watcher) StartIndex() uint64 { return mkV2Rev(w.startRev) }
+
+func (w *v2v3Watcher) Remove() {
+ w.cancel()
+ <-w.donec
+}
+
+func (w *v2v3Watcher) EventChan() chan *v2store.Event { return w.evc }
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3alarm/alarms.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3alarm/alarms.go
new file mode 100644
index 000000000000..2b085a8e7188
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3alarm/alarms.go
@@ -0,0 +1,153 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3alarm manages health status alarms in etcd.
+package v3alarm
+
+import (
+ "sync"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+var (
+ alarmBucketName = []byte("alarm")
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "alarm")
+)
+
+type BackendGetter interface {
+ Backend() backend.Backend
+}
+
+type alarmSet map[types.ID]*pb.AlarmMember
+
+// AlarmStore persists alarms to the backend.
+type AlarmStore struct {
+ mu sync.Mutex
+ types map[pb.AlarmType]alarmSet
+
+ bg BackendGetter
+}
+
+func NewAlarmStore(bg BackendGetter) (*AlarmStore, error) {
+ ret := &AlarmStore{types: make(map[pb.AlarmType]alarmSet), bg: bg}
+ err := ret.restore()
+ return ret, err
+}
+
+func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at}
+ if m := a.addToMap(newAlarm); m != newAlarm {
+ return m
+ }
+
+ v, err := newAlarm.Marshal()
+ if err != nil {
+ plog.Panicf("failed to marshal alarm member")
+ }
+
+ b := a.bg.Backend()
+ b.BatchTx().Lock()
+ b.BatchTx().UnsafePut(alarmBucketName, v, nil)
+ b.BatchTx().Unlock()
+
+ return newAlarm
+}
+
+func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ t := a.types[at]
+ if t == nil {
+ t = make(alarmSet)
+ a.types[at] = t
+ }
+ m := t[id]
+ if m == nil {
+ return nil
+ }
+
+ delete(t, id)
+
+ v, err := m.Marshal()
+ if err != nil {
+ plog.Panicf("failed to marshal alarm member")
+ }
+
+ b := a.bg.Backend()
+ b.BatchTx().Lock()
+ b.BatchTx().UnsafeDelete(alarmBucketName, v)
+ b.BatchTx().Unlock()
+
+ return m
+}
+
+func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ if at == pb.AlarmType_NONE {
+ for _, t := range a.types {
+ for _, m := range t {
+ ret = append(ret, m)
+ }
+ }
+ return ret
+ }
+ for _, m := range a.types[at] {
+ ret = append(ret, m)
+ }
+ return ret
+}
+
+func (a *AlarmStore) restore() error {
+ b := a.bg.Backend()
+ tx := b.BatchTx()
+
+ tx.Lock()
+ tx.UnsafeCreateBucket(alarmBucketName)
+ err := tx.UnsafeForEach(alarmBucketName, func(k, v []byte) error {
+ var m pb.AlarmMember
+ if err := m.Unmarshal(k); err != nil {
+ return err
+ }
+ a.addToMap(&m)
+ return nil
+ })
+ tx.Unlock()
+
+ b.ForceCommit()
+ return err
+}
+
+func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember {
+ t := a.types[newAlarm.Alarm]
+ if t == nil {
+ t = make(alarmSet)
+ a.types[newAlarm.Alarm] = t
+ }
+ m := t[types.ID(newAlarm.MemberID)]
+ if m != nil {
+ return m
+ }
+ t[types.ID(newAlarm.MemberID)] = newAlarm
+ return newAlarm
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3client/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3client/doc.go
new file mode 100644
index 000000000000..47922c43316f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3client/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3client provides clientv3 interfaces from an etcdserver.
+//
+// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New:
+//
+// import (
+// "context"
+//
+// "go.etcd.io/etcd/embed"
+// "go.etcd.io/etcd/etcdserver/api/v3client"
+// )
+//
+// ...
+//
+// // create an embedded EtcdServer from the default configuration
+// cfg := embed.NewConfig()
+// cfg.Dir = "default.etcd"
+// e, err := embed.StartEtcd(cfg)
+// if err != nil {
+// // handle error!
+// }
+//
+// // wrap the EtcdServer with v3client
+// cli := v3client.New(e.Server)
+//
+// // use like an ordinary clientv3
+// resp, err := cli.Put(context.TODO(), "some-key", "it works!")
+// if err != nil {
+// // handle error!
+// }
+//
+package v3client
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3client/v3client.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3client/v3client.go
new file mode 100644
index 000000000000..d2031213cfb3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3client/v3client.go
@@ -0,0 +1,66 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3client
+
+import (
+ "context"
+ "time"
+
+ "go.etcd.io/etcd/clientv3"
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc"
+ "go.etcd.io/etcd/proxy/grpcproxy/adapter"
+)
+
+// New creates a clientv3 client that wraps an in-process EtcdServer. Instead
+// of making gRPC calls through sockets, the client makes direct function calls
+// to the etcd server through its api/v3rpc function interfaces.
+func New(s *etcdserver.EtcdServer) *clientv3.Client {
+ c := clientv3.NewCtxClient(context.Background())
+
+ kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s))
+ c.KV = clientv3.NewKVFromKVClient(kvc, c)
+
+ lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s))
+ c.Lease = clientv3.NewLeaseFromLeaseClient(lc, c, time.Second)
+
+ wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s))
+ c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)}
+
+ mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s))
+ c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c)
+
+ clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s))
+ c.Cluster = clientv3.NewClusterFromClusterClient(clc, c)
+
+ // TODO: implement clientv3.Auth interface?
+
+ return c
+}
+
+// BlankContext implements Stringer on a context so the ctx string doesn't
+// depend on the context's WithValue data, which tends to be unsynchronized
+// (e.g., x/net/trace), causing ctx.String() to throw data races.
+type blankContext struct{ context.Context }
+
+func (*blankContext) String() string { return "(blankCtx)" }
+
+// watchWrapper wraps clientv3 watch calls to blank out the context
+// to avoid races on trace data.
+type watchWrapper struct{ clientv3.Watcher }
+
+func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
+ return ww.Watcher.Watch(&blankContext{ctx}, key, opts...)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/compactor.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/compactor.go
new file mode 100644
index 000000000000..73a96842d1c2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/compactor.go
@@ -0,0 +1,75 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3compactor
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ "github.com/coreos/pkg/capnslog"
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "compactor")
+)
+
+const (
+ ModePeriodic = "periodic"
+ ModeRevision = "revision"
+)
+
+// Compactor purges old log from the storage periodically.
+type Compactor interface {
+ // Run starts the main loop of the compactor in background.
+ // Use Stop() to halt the loop and release the resource.
+ Run()
+ // Stop halts the main loop of the compactor.
+ Stop()
+ // Pause temporally suspend the compactor not to run compaction. Resume() to unpose.
+ Pause()
+ // Resume restarts the compactor suspended by Pause().
+ Resume()
+}
+
+type Compactable interface {
+ Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
+}
+
+type RevGetter interface {
+ Rev() int64
+}
+
+// New returns a new Compactor based on given "mode".
+func New(
+ lg *zap.Logger,
+ mode string,
+ retention time.Duration,
+ rg RevGetter,
+ c Compactable,
+) (Compactor, error) {
+ switch mode {
+ case ModePeriodic:
+ return newPeriodic(lg, clockwork.NewRealClock(), retention, rg, c), nil
+ case ModeRevision:
+ return newRevision(lg, clockwork.NewRealClock(), int64(retention), rg, c), nil
+ default:
+ return nil, fmt.Errorf("unsupported compaction mode %s", mode)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/doc.go
new file mode 100644
index 000000000000..bb28046ce4b1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3compactor implements automated policies for compacting etcd's mvcc storage.
+package v3compactor
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/periodic.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/periodic.go
new file mode 100644
index 000000000000..ab64cb70619a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/periodic.go
@@ -0,0 +1,217 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3compactor
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+)
+
+// Periodic compacts the log by purging revisions older than
+// the configured retention time.
+type Periodic struct {
+ lg *zap.Logger
+ clock clockwork.Clock
+ period time.Duration
+
+ rg RevGetter
+ c Compactable
+
+ revs []int64
+ ctx context.Context
+ cancel context.CancelFunc
+
+ // mu protects paused
+ mu sync.RWMutex
+ paused bool
+}
+
+// newPeriodic creates a new instance of Periodic compactor that purges
+// the log older than h Duration.
+func newPeriodic(lg *zap.Logger, clock clockwork.Clock, h time.Duration, rg RevGetter, c Compactable) *Periodic {
+ pc := &Periodic{
+ lg: lg,
+ clock: clock,
+ period: h,
+ rg: rg,
+ c: c,
+ revs: make([]int64, 0),
+ }
+ pc.ctx, pc.cancel = context.WithCancel(context.Background())
+ return pc
+}
+
+/*
+Compaction period 1-hour:
+ 1. compute compaction period, which is 1-hour
+ 2. record revisions for every 1/10 of 1-hour (6-minute)
+ 3. keep recording revisions with no compaction for first 1-hour
+ 4. do compact with revs[0]
+ - success? contiue on for-loop and move sliding window; revs = revs[1:]
+ - failure? update revs, and retry after 1/10 of 1-hour (6-minute)
+
+Compaction period 24-hour:
+ 1. compute compaction period, which is 1-hour
+ 2. record revisions for every 1/10 of 1-hour (6-minute)
+ 3. keep recording revisions with no compaction for first 24-hour
+ 4. do compact with revs[0]
+ - success? contiue on for-loop and move sliding window; revs = revs[1:]
+ - failure? update revs, and retry after 1/10 of 1-hour (6-minute)
+
+Compaction period 59-min:
+ 1. compute compaction period, which is 59-min
+ 2. record revisions for every 1/10 of 59-min (5.9-min)
+ 3. keep recording revisions with no compaction for first 59-min
+ 4. do compact with revs[0]
+ - success? contiue on for-loop and move sliding window; revs = revs[1:]
+ - failure? update revs, and retry after 1/10 of 59-min (5.9-min)
+
+Compaction period 5-sec:
+ 1. compute compaction period, which is 5-sec
+ 2. record revisions for every 1/10 of 5-sec (0.5-sec)
+ 3. keep recording revisions with no compaction for first 5-sec
+ 4. do compact with revs[0]
+ - success? contiue on for-loop and move sliding window; revs = revs[1:]
+ - failure? update revs, and retry after 1/10 of 5-sec (0.5-sec)
+*/
+
+// Run runs periodic compactor.
+func (pc *Periodic) Run() {
+ compactInterval := pc.getCompactInterval()
+ retryInterval := pc.getRetryInterval()
+ retentions := pc.getRetentions()
+
+ go func() {
+ lastSuccess := pc.clock.Now()
+ baseInterval := pc.period
+ for {
+ pc.revs = append(pc.revs, pc.rg.Rev())
+ if len(pc.revs) > retentions {
+ pc.revs = pc.revs[1:] // pc.revs[0] is always the rev at pc.period ago
+ }
+
+ select {
+ case <-pc.ctx.Done():
+ return
+ case <-pc.clock.After(retryInterval):
+ pc.mu.Lock()
+ p := pc.paused
+ pc.mu.Unlock()
+ if p {
+ continue
+ }
+ }
+
+ if pc.clock.Now().Sub(lastSuccess) < baseInterval {
+ continue
+ }
+
+ // wait up to initial given period
+ if baseInterval == pc.period {
+ baseInterval = compactInterval
+ }
+ rev := pc.revs[0]
+
+ if pc.lg != nil {
+ pc.lg.Info(
+ "starting auto periodic compaction",
+ zap.Int64("revision", rev),
+ zap.Duration("compact-period", pc.period),
+ )
+ } else {
+ plog.Noticef("Starting auto-compaction at revision %d (retention: %v)", rev, pc.period)
+ }
+ _, err := pc.c.Compact(pc.ctx, &pb.CompactionRequest{Revision: rev})
+ if err == nil || err == mvcc.ErrCompacted {
+ if pc.lg != nil {
+ pc.lg.Info(
+ "completed auto periodic compaction",
+ zap.Int64("revision", rev),
+ zap.Duration("compact-period", pc.period),
+ zap.Duration("took", time.Since(lastSuccess)),
+ )
+ } else {
+ plog.Noticef("Finished auto-compaction at revision %d", rev)
+ }
+ lastSuccess = pc.clock.Now()
+ } else {
+ if pc.lg != nil {
+ pc.lg.Warn(
+ "failed auto periodic compaction",
+ zap.Int64("revision", rev),
+ zap.Duration("compact-period", pc.period),
+ zap.Duration("retry-interval", retryInterval),
+ zap.Error(err),
+ )
+ } else {
+ plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
+ plog.Noticef("Retry after %v", retryInterval)
+ }
+ }
+ }
+ }()
+}
+
+// if given compaction period x is <1-hour, compact every x duration.
+// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='10m', then compact every 10-minute)
+// if given compaction period x is >1-hour, compact every hour.
+// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='2h', then compact every 1-hour)
+func (pc *Periodic) getCompactInterval() time.Duration {
+ itv := pc.period
+ if itv > time.Hour {
+ itv = time.Hour
+ }
+ return itv
+}
+
+func (pc *Periodic) getRetentions() int {
+ return int(pc.period/pc.getRetryInterval()) + 1
+}
+
+const retryDivisor = 10
+
+func (pc *Periodic) getRetryInterval() time.Duration {
+ itv := pc.period
+ if itv > time.Hour {
+ itv = time.Hour
+ }
+ return itv / retryDivisor
+}
+
+// Stop stops periodic compactor.
+func (pc *Periodic) Stop() {
+ pc.cancel()
+}
+
+// Pause pauses periodic compactor.
+func (pc *Periodic) Pause() {
+ pc.mu.Lock()
+ pc.paused = true
+ pc.mu.Unlock()
+}
+
+// Resume resumes periodic compactor.
+func (pc *Periodic) Resume() {
+ pc.mu.Lock()
+ pc.paused = false
+ pc.mu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/revision.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/revision.go
new file mode 100644
index 000000000000..cf8ac430105c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3compactor/revision.go
@@ -0,0 +1,143 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3compactor
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+)
+
+// Revision compacts the log by purging revisions older than
+// the configured reivison number. Compaction happens every 5 minutes.
+type Revision struct {
+ lg *zap.Logger
+
+ clock clockwork.Clock
+ retention int64
+
+ rg RevGetter
+ c Compactable
+
+ ctx context.Context
+ cancel context.CancelFunc
+
+ mu sync.Mutex
+ paused bool
+}
+
+// newRevision creates a new instance of Revisonal compactor that purges
+// the log older than retention revisions from the current revision.
+func newRevision(lg *zap.Logger, clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision {
+ rc := &Revision{
+ lg: lg,
+ clock: clock,
+ retention: retention,
+ rg: rg,
+ c: c,
+ }
+ rc.ctx, rc.cancel = context.WithCancel(context.Background())
+ return rc
+}
+
+const revInterval = 5 * time.Minute
+
+// Run runs revision-based compactor.
+func (rc *Revision) Run() {
+ prev := int64(0)
+ go func() {
+ for {
+ select {
+ case <-rc.ctx.Done():
+ return
+ case <-rc.clock.After(revInterval):
+ rc.mu.Lock()
+ p := rc.paused
+ rc.mu.Unlock()
+ if p {
+ continue
+ }
+ }
+
+ rev := rc.rg.Rev() - rc.retention
+ if rev <= 0 || rev == prev {
+ continue
+ }
+
+ now := time.Now()
+ if rc.lg != nil {
+ rc.lg.Info(
+ "starting auto revision compaction",
+ zap.Int64("revision", rev),
+ zap.Int64("revision-compaction-retention", rc.retention),
+ )
+ } else {
+ plog.Noticef("Starting auto-compaction at revision %d (retention: %d revisions)", rev, rc.retention)
+ }
+ _, err := rc.c.Compact(rc.ctx, &pb.CompactionRequest{Revision: rev})
+ if err == nil || err == mvcc.ErrCompacted {
+ prev = rev
+ if rc.lg != nil {
+ rc.lg.Info(
+ "completed auto revision compaction",
+ zap.Int64("revision", rev),
+ zap.Int64("revision-compaction-retention", rc.retention),
+ zap.Duration("took", time.Since(now)),
+ )
+ } else {
+ plog.Noticef("Finished auto-compaction at revision %d", rev)
+ }
+ } else {
+ if rc.lg != nil {
+ rc.lg.Warn(
+ "failed auto revision compaction",
+ zap.Int64("revision", rev),
+ zap.Int64("revision-compaction-retention", rc.retention),
+ zap.Duration("retry-interval", revInterval),
+ zap.Error(err),
+ )
+ } else {
+ plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
+ plog.Noticef("Retry after %v", revInterval)
+ }
+ }
+ }
+ }()
+}
+
+// Stop stops revision-based compactor.
+func (rc *Revision) Stop() {
+ rc.cancel()
+}
+
+// Pause pauses revision-based compactor.
+func (rc *Revision) Pause() {
+ rc.mu.Lock()
+ rc.paused = true
+ rc.mu.Unlock()
+}
+
+// Resume resumes revision-based compactor.
+func (rc *Revision) Resume() {
+ rc.mu.Lock()
+ rc.paused = false
+ rc.mu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3election/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/doc.go
new file mode 100644
index 000000000000..d6fefd741500
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3election provides a v3 election service from an etcdserver.
+package v3election
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3election/election.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/election.go
new file mode 100644
index 000000000000..f5a3be3b2398
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/election.go
@@ -0,0 +1,134 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3election
+
+import (
+ "context"
+ "errors"
+
+ "go.etcd.io/etcd/clientv3"
+ "go.etcd.io/etcd/clientv3/concurrency"
+ epb "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb"
+)
+
+// ErrMissingLeaderKey is returned when election API request
+// is missing the "leader" field.
+var ErrMissingLeaderKey = errors.New(`"leader" field must be provided`)
+
+type electionServer struct {
+ c *clientv3.Client
+}
+
+func NewElectionServer(c *clientv3.Client) epb.ElectionServer {
+ return &electionServer{c}
+}
+
+func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) {
+ s, err := es.session(ctx, req.Lease)
+ if err != nil {
+ return nil, err
+ }
+ e := concurrency.NewElection(s, string(req.Name))
+ if err = e.Campaign(ctx, string(req.Value)); err != nil {
+ return nil, err
+ }
+ return &epb.CampaignResponse{
+ Header: e.Header(),
+ Leader: &epb.LeaderKey{
+ Name: req.Name,
+ Key: []byte(e.Key()),
+ Rev: e.Rev(),
+ Lease: int64(s.Lease()),
+ },
+ }, nil
+}
+
+func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
+ if req.Leader == nil {
+ return nil, ErrMissingLeaderKey
+ }
+ s, err := es.session(ctx, req.Leader.Lease)
+ if err != nil {
+ return nil, err
+ }
+ e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
+ if err := e.Proclaim(ctx, string(req.Value)); err != nil {
+ return nil, err
+ }
+ return &epb.ProclaimResponse{Header: e.Header()}, nil
+}
+
+func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error {
+ s, err := es.session(stream.Context(), -1)
+ if err != nil {
+ return err
+ }
+ e := concurrency.NewElection(s, string(req.Name))
+ ch := e.Observe(stream.Context())
+ for stream.Context().Err() == nil {
+ select {
+ case <-stream.Context().Done():
+ case resp, ok := <-ch:
+ if !ok {
+ return nil
+ }
+ lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]}
+ if err := stream.Send(lresp); err != nil {
+ return err
+ }
+ }
+ }
+ return stream.Context().Err()
+}
+
+func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) {
+ s, err := es.session(ctx, -1)
+ if err != nil {
+ return nil, err
+ }
+ l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx)
+ if lerr != nil {
+ return nil, lerr
+ }
+ return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil
+}
+
+func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
+ if req.Leader == nil {
+ return nil, ErrMissingLeaderKey
+ }
+ s, err := es.session(ctx, req.Leader.Lease)
+ if err != nil {
+ return nil, err
+ }
+ e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
+ if err := e.Resign(ctx); err != nil {
+ return nil, err
+ }
+ return &epb.ResignResponse{Header: e.Header()}, nil
+}
+
+func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) {
+ s, err := concurrency.NewSession(
+ es.c,
+ concurrency.WithLease(clientv3.LeaseID(lease)),
+ concurrency.WithContext(ctx),
+ )
+ if err != nil {
+ return nil, err
+ }
+ s.Orphan()
+ return s, nil
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
new file mode 100644
index 000000000000..23551b54b60a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
@@ -0,0 +1,313 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: etcdserver/api/v3election/v3electionpb/v3election.proto
+
+/*
+Package v3electionpb is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package gw
+
+import (
+ "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+
+func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq v3electionpb.CampaignRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq v3electionpb.ProclaimRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq v3electionpb.LeaderRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) {
+ var protoReq v3electionpb.LeaderRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ stream, err := client.Observe(ctx, &protoReq)
+ if err != nil {
+ return nil, metadata, err
+ }
+ header, err := stream.Header()
+ if err != nil {
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+
+}
+
+func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq v3electionpb.ResignRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterElectionHandler(ctx, mux, conn)
+}
+
+// RegisterElectionHandler registers the http handlers for service Election to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn))
+}
+
+// RegisterElectionHandler registers the http handlers for service Election to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "ElectionClient" to call the correct interceptors.
+func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
+
+ mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "campaign"}, ""))
+
+ pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "proclaim"}, ""))
+
+ pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "leader"}, ""))
+
+ pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "observe"}, ""))
+
+ pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "resign"}, ""))
+)
+
+var (
+ forward_Election_Campaign_0 = runtime.ForwardResponseMessage
+
+ forward_Election_Proclaim_0 = runtime.ForwardResponseMessage
+
+ forward_Election_Leader_0 = runtime.ForwardResponseMessage
+
+ forward_Election_Observe_0 = runtime.ForwardResponseStream
+
+ forward_Election_Resign_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
new file mode 100644
index 000000000000..1fc1bce442f9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
@@ -0,0 +1,2079 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: v3election.proto
+
+/*
+ Package v3electionpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ v3election.proto
+
+ It has these top-level messages:
+ CampaignRequest
+ CampaignResponse
+ LeaderKey
+ LeaderRequest
+ LeaderResponse
+ ResignRequest
+ ResignResponse
+ ProclaimRequest
+ ProclaimResponse
+*/
+package v3electionpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ etcdserverpb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
+
+ context "golang.org/x/net/context"
+
+ grpc "google.golang.org/grpc"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type CampaignRequest struct {
+ // name is the election's identifier for the campaign.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // lease is the ID of the lease attached to leadership of the election. If the
+ // lease expires or is revoked before resigning leadership, then the
+ // leadership is transferred to the next campaigner, if any.
+ Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
+ // value is the initial proclaimed value set when the campaigner wins the
+ // election.
+ Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *CampaignRequest) Reset() { *m = CampaignRequest{} }
+func (m *CampaignRequest) String() string { return proto.CompactTextString(m) }
+func (*CampaignRequest) ProtoMessage() {}
+func (*CampaignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{0} }
+
+func (m *CampaignRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *CampaignRequest) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+func (m *CampaignRequest) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type CampaignResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // leader describes the resources used for holding leadereship of the election.
+ Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"`
+}
+
+func (m *CampaignResponse) Reset() { *m = CampaignResponse{} }
+func (m *CampaignResponse) String() string { return proto.CompactTextString(m) }
+func (*CampaignResponse) ProtoMessage() {}
+func (*CampaignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{1} }
+
+func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *CampaignResponse) GetLeader() *LeaderKey {
+ if m != nil {
+ return m.Leader
+ }
+ return nil
+}
+
+type LeaderKey struct {
+ // name is the election identifier that correponds to the leadership key.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // key is an opaque key representing the ownership of the election. If the key
+ // is deleted, then leadership is lost.
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ // rev is the creation revision of the key. It can be used to test for ownership
+ // of an election during transactions by testing the key's creation revision
+ // matches rev.
+ Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"`
+ // lease is the lease ID of the election leader.
+ Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"`
+}
+
+func (m *LeaderKey) Reset() { *m = LeaderKey{} }
+func (m *LeaderKey) String() string { return proto.CompactTextString(m) }
+func (*LeaderKey) ProtoMessage() {}
+func (*LeaderKey) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{2} }
+
+func (m *LeaderKey) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *LeaderKey) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *LeaderKey) GetRev() int64 {
+ if m != nil {
+ return m.Rev
+ }
+ return 0
+}
+
+func (m *LeaderKey) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+type LeaderRequest struct {
+ // name is the election identifier for the leadership information.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *LeaderRequest) Reset() { *m = LeaderRequest{} }
+func (m *LeaderRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaderRequest) ProtoMessage() {}
+func (*LeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{3} }
+
+func (m *LeaderRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+type LeaderResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // kv is the key-value pair representing the latest leader update.
+ Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"`
+}
+
+func (m *LeaderResponse) Reset() { *m = LeaderResponse{} }
+func (m *LeaderResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaderResponse) ProtoMessage() {}
+func (*LeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{4} }
+
+func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaderResponse) GetKv() *mvccpb.KeyValue {
+ if m != nil {
+ return m.Kv
+ }
+ return nil
+}
+
+type ResignRequest struct {
+ // leader is the leadership to relinquish by resignation.
+ Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"`
+}
+
+func (m *ResignRequest) Reset() { *m = ResignRequest{} }
+func (m *ResignRequest) String() string { return proto.CompactTextString(m) }
+func (*ResignRequest) ProtoMessage() {}
+func (*ResignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{5} }
+
+func (m *ResignRequest) GetLeader() *LeaderKey {
+ if m != nil {
+ return m.Leader
+ }
+ return nil
+}
+
+type ResignResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *ResignResponse) Reset() { *m = ResignResponse{} }
+func (m *ResignResponse) String() string { return proto.CompactTextString(m) }
+func (*ResignResponse) ProtoMessage() {}
+func (*ResignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{6} }
+
+func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type ProclaimRequest struct {
+ // leader is the leadership hold on the election.
+ Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"`
+ // value is an update meant to overwrite the leader's current value.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} }
+func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) }
+func (*ProclaimRequest) ProtoMessage() {}
+func (*ProclaimRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{7} }
+
+func (m *ProclaimRequest) GetLeader() *LeaderKey {
+ if m != nil {
+ return m.Leader
+ }
+ return nil
+}
+
+func (m *ProclaimRequest) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type ProclaimResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} }
+func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) }
+func (*ProclaimResponse) ProtoMessage() {}
+func (*ProclaimResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{8} }
+
+func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest")
+ proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse")
+ proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey")
+ proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest")
+ proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse")
+ proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest")
+ proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse")
+ proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest")
+ proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Election service
+
+type ElectionClient interface {
+ // Campaign waits to acquire leadership in an election, returning a LeaderKey
+ // representing the leadership if successful. The LeaderKey can then be used
+ // to issue new values on the election, transactionally guard API requests on
+ // leadership still being held, and resign from the election.
+ Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error)
+ // Proclaim updates the leader's posted value with a new value.
+ Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error)
+ // Leader returns the current election proclamation, if any.
+ Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error)
+ // Observe streams election proclamations in-order as made by the election's
+ // elected leaders.
+ Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error)
+ // Resign releases election leadership so other campaigners may acquire
+ // leadership on the election.
+ Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error)
+}
+
+type electionClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewElectionClient(cc *grpc.ClientConn) ElectionClient {
+ return &electionClient{cc}
+}
+
+func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) {
+ out := new(CampaignResponse)
+ err := grpc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) {
+ out := new(ProclaimResponse)
+ err := grpc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) {
+ out := new(LeaderResponse)
+ err := grpc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) {
+ stream, err := grpc.NewClientStream(ctx, &_Election_serviceDesc.Streams[0], c.cc, "/v3electionpb.Election/Observe", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &electionObserveClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Election_ObserveClient interface {
+ Recv() (*LeaderResponse, error)
+ grpc.ClientStream
+}
+
+type electionObserveClient struct {
+ grpc.ClientStream
+}
+
+func (x *electionObserveClient) Recv() (*LeaderResponse, error) {
+ m := new(LeaderResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) {
+ out := new(ResignResponse)
+ err := grpc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Election service
+
+type ElectionServer interface {
+ // Campaign waits to acquire leadership in an election, returning a LeaderKey
+ // representing the leadership if successful. The LeaderKey can then be used
+ // to issue new values on the election, transactionally guard API requests on
+ // leadership still being held, and resign from the election.
+ Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error)
+ // Proclaim updates the leader's posted value with a new value.
+ Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error)
+ // Leader returns the current election proclamation, if any.
+ Leader(context.Context, *LeaderRequest) (*LeaderResponse, error)
+ // Observe streams election proclamations in-order as made by the election's
+ // elected leaders.
+ Observe(*LeaderRequest, Election_ObserveServer) error
+ // Resign releases election leadership so other campaigners may acquire
+ // leadership on the election.
+ Resign(context.Context, *ResignRequest) (*ResignResponse, error)
+}
+
+func RegisterElectionServer(s *grpc.Server, srv ElectionServer) {
+ s.RegisterService(&_Election_serviceDesc, srv)
+}
+
+func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CampaignRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Campaign(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Campaign",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ProclaimRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Proclaim(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Proclaim",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Leader(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Leader",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(LeaderRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(ElectionServer).Observe(m, &electionObserveServer{stream})
+}
+
+type Election_ObserveServer interface {
+ Send(*LeaderResponse) error
+ grpc.ServerStream
+}
+
+type electionObserveServer struct {
+ grpc.ServerStream
+}
+
+func (x *electionObserveServer) Send(m *LeaderResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ResignRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Resign(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Resign",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Election_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "v3electionpb.Election",
+ HandlerType: (*ElectionServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Campaign",
+ Handler: _Election_Campaign_Handler,
+ },
+ {
+ MethodName: "Proclaim",
+ Handler: _Election_Proclaim_Handler,
+ },
+ {
+ MethodName: "Leader",
+ Handler: _Election_Leader_Handler,
+ },
+ {
+ MethodName: "Resign",
+ Handler: _Election_Resign_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Observe",
+ Handler: _Election_Observe_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "v3election.proto",
+}
+
+func (m *CampaignRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CampaignRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if m.Lease != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Lease))
+ }
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ return i, nil
+}
+
+func (m *CampaignResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CampaignResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size()))
+ n1, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if m.Leader != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size()))
+ n2, err := m.Leader.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func (m *LeaderKey) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaderKey) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if len(m.Key) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ if m.Rev != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Rev))
+ }
+ if m.Lease != 0 {
+ dAtA[i] = 0x20
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Lease))
+ }
+ return i, nil
+}
+
+func (m *LeaderRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaderRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ return i, nil
+}
+
+func (m *LeaderResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaderResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size()))
+ n3, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n3
+ }
+ if m.Kv != nil {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Kv.Size()))
+ n4, err := m.Kv.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n4
+ }
+ return i, nil
+}
+
+func (m *ResignRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResignRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Leader != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size()))
+ n5, err := m.Leader.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n5
+ }
+ return i, nil
+}
+
+func (m *ResignResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResignResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size()))
+ n6, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n6
+ }
+ return i, nil
+}
+
+func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProclaimRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Leader != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size()))
+ n7, err := m.Leader.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n7
+ }
+ if len(m.Value) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value)))
+ i += copy(dAtA[i:], m.Value)
+ }
+ return i, nil
+}
+
+func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProclaimResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size()))
+ n8, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n8
+ }
+ return i, nil
+}
+
+func encodeVarintV3Election(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *CampaignRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovV3Election(uint64(m.Lease))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ return n
+}
+
+func (m *CampaignResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.Leader != nil {
+ l = m.Leader.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ return n
+}
+
+func (m *LeaderKey) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.Rev != 0 {
+ n += 1 + sovV3Election(uint64(m.Rev))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovV3Election(uint64(m.Lease))
+ }
+ return n
+}
+
+func (m *LeaderRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ return n
+}
+
+func (m *LeaderResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.Kv != nil {
+ l = m.Kv.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ return n
+}
+
+func (m *ResignRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Leader != nil {
+ l = m.Leader.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ return n
+}
+
+func (m *ResignResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ return n
+}
+
+func (m *ProclaimRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.Leader != nil {
+ l = m.Leader.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ return n
+}
+
+func (m *ProclaimResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ return n
+}
+
+func sovV3Election(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozV3Election(x uint64) (n int) {
+ return sovV3Election(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *CampaignRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CampaignRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CampaignRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CampaignResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CampaignResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CampaignResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Leader == nil {
+ m.Leader = &LeaderKey{}
+ }
+ if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaderKey) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaderKey: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaderKey: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rev", wireType)
+ }
+ m.Rev = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Rev |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaderRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaderRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaderResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaderResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kv == nil {
+ m.Kv = &mvccpb.KeyValue{}
+ }
+ if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResignRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResignRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResignRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Leader == nil {
+ m.Leader = &LeaderKey{}
+ }
+ if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResignResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResignResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResignResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProclaimRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProclaimRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProclaimRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Leader == nil {
+ m.Leader = &LeaderKey{}
+ }
+ if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProclaimResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProclaimResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProclaimResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipV3Election(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthV3Election
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipV3Election(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("v3election.proto", fileDescriptorV3Election) }
+
+var fileDescriptorV3Election = []byte{
+ // 535 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xcf, 0x6e, 0xd3, 0x40,
+ 0x10, 0xc6, 0x59, 0x27, 0x84, 0x32, 0xa4, 0xad, 0x65, 0x82, 0x48, 0x43, 0x30, 0xd1, 0x22, 0xa1,
+ 0x2a, 0x07, 0x2f, 0x6a, 0x38, 0xe5, 0x84, 0x40, 0xa0, 0x4a, 0x45, 0x02, 0x7c, 0x40, 0x70, 0xdc,
+ 0xb8, 0x23, 0x37, 0x8a, 0xe3, 0x35, 0xb6, 0x6b, 0x29, 0x57, 0x5e, 0x81, 0x03, 0x3c, 0x12, 0x47,
+ 0x24, 0x5e, 0x00, 0x05, 0x1e, 0x04, 0xed, 0xae, 0x8d, 0xff, 0x28, 0x41, 0xa8, 0xb9, 0x58, 0xe3,
+ 0x9d, 0xcf, 0xf3, 0x9b, 0x6f, 0x76, 0x12, 0x30, 0xb3, 0x09, 0x06, 0xe8, 0xa5, 0x73, 0x11, 0x3a,
+ 0x51, 0x2c, 0x52, 0x61, 0x75, 0xcb, 0x93, 0x68, 0x36, 0xe8, 0xf9, 0xc2, 0x17, 0x2a, 0xc1, 0x64,
+ 0xa4, 0x35, 0x83, 0x47, 0x98, 0x7a, 0xe7, 0x4c, 0x3e, 0x12, 0x8c, 0x33, 0x8c, 0x2b, 0x61, 0x34,
+ 0x63, 0x71, 0xe4, 0xe5, 0xba, 0x23, 0xa5, 0x5b, 0x66, 0x9e, 0xa7, 0x1e, 0xd1, 0x8c, 0x2d, 0xb2,
+ 0x3c, 0x35, 0xf4, 0x85, 0xf0, 0x03, 0x64, 0x3c, 0x9a, 0x33, 0x1e, 0x86, 0x22, 0xe5, 0x92, 0x98,
+ 0xe8, 0x2c, 0x7d, 0x0b, 0x87, 0xcf, 0xf9, 0x32, 0xe2, 0x73, 0x3f, 0x74, 0xf1, 0xe3, 0x25, 0x26,
+ 0xa9, 0x65, 0x41, 0x3b, 0xe4, 0x4b, 0xec, 0x93, 0x11, 0x39, 0xee, 0xba, 0x2a, 0xb6, 0x7a, 0x70,
+ 0x3d, 0x40, 0x9e, 0x60, 0xdf, 0x18, 0x91, 0xe3, 0x96, 0xab, 0x5f, 0xe4, 0x69, 0xc6, 0x83, 0x4b,
+ 0xec, 0xb7, 0x94, 0x54, 0xbf, 0xd0, 0x15, 0x98, 0x65, 0xc9, 0x24, 0x12, 0x61, 0x82, 0xd6, 0x13,
+ 0xe8, 0x5c, 0x20, 0x3f, 0xc7, 0x58, 0x55, 0xbd, 0x75, 0x32, 0x74, 0xaa, 0x46, 0x9c, 0x42, 0x77,
+ 0xaa, 0x34, 0x6e, 0xae, 0xb5, 0x18, 0x74, 0x02, 0xfd, 0x95, 0xa1, 0xbe, 0xba, 0xeb, 0x54, 0x47,
+ 0xe6, 0xbc, 0x52, 0xb9, 0x33, 0x5c, 0xb9, 0xb9, 0x8c, 0x7e, 0x80, 0x9b, 0x7f, 0x0f, 0x37, 0xfa,
+ 0x30, 0xa1, 0xb5, 0xc0, 0x95, 0x2a, 0xd7, 0x75, 0x65, 0x28, 0x4f, 0x62, 0xcc, 0x94, 0x83, 0x96,
+ 0x2b, 0xc3, 0xd2, 0x6b, 0xbb, 0xe2, 0x95, 0x3e, 0x84, 0x7d, 0x5d, 0xfa, 0x1f, 0x63, 0xa2, 0x17,
+ 0x70, 0x50, 0x88, 0x76, 0x32, 0x3e, 0x02, 0x63, 0x91, 0xe5, 0xa6, 0x4d, 0x47, 0xdf, 0xa8, 0x73,
+ 0x86, 0xab, 0x77, 0x72, 0xc0, 0xae, 0xb1, 0xc8, 0xe8, 0x53, 0xd8, 0x77, 0x31, 0xa9, 0xdc, 0x5a,
+ 0x39, 0x2b, 0xf2, 0x7f, 0xb3, 0x7a, 0x09, 0x07, 0x45, 0x85, 0x5d, 0x7a, 0xa5, 0xef, 0xe1, 0xf0,
+ 0x4d, 0x2c, 0xbc, 0x80, 0xcf, 0x97, 0x57, 0xed, 0xa5, 0x5c, 0x24, 0xa3, 0xba, 0x48, 0xa7, 0x60,
+ 0x96, 0x95, 0x77, 0xe9, 0xf1, 0xe4, 0x4b, 0x1b, 0xf6, 0x5e, 0xe4, 0x0d, 0x58, 0x0b, 0xd8, 0x2b,
+ 0xf6, 0xd3, 0xba, 0x5f, 0xef, 0xac, 0xf1, 0x53, 0x18, 0xd8, 0xdb, 0xd2, 0x9a, 0x42, 0x47, 0x9f,
+ 0x7e, 0xfc, 0xfe, 0x6c, 0x0c, 0xe8, 0x1d, 0x96, 0x4d, 0x58, 0x21, 0x64, 0x5e, 0x2e, 0x9b, 0x92,
+ 0xb1, 0x84, 0x15, 0x1e, 0x9a, 0xb0, 0xc6, 0xd4, 0x9a, 0xb0, 0xa6, 0xf5, 0x2d, 0xb0, 0x28, 0x97,
+ 0x49, 0x98, 0x07, 0x1d, 0x3d, 0x5b, 0xeb, 0xde, 0xa6, 0x89, 0x17, 0xa0, 0xe1, 0xe6, 0x64, 0x8e,
+ 0xb1, 0x15, 0xa6, 0x4f, 0x6f, 0xd7, 0x30, 0xfa, 0xa2, 0x24, 0xc4, 0x87, 0x1b, 0xaf, 0x67, 0x6a,
+ 0xe0, 0xbb, 0x50, 0x1e, 0x28, 0xca, 0x11, 0xed, 0xd5, 0x28, 0x42, 0x17, 0x9e, 0x92, 0xf1, 0x63,
+ 0x22, 0xdd, 0xe8, 0x05, 0x6d, 0x72, 0x6a, 0x8b, 0xdf, 0xe4, 0xd4, 0x77, 0x7a, 0x8b, 0x9b, 0x58,
+ 0x89, 0xa6, 0x64, 0xfc, 0xcc, 0xfc, 0xb6, 0xb6, 0xc9, 0xf7, 0xb5, 0x4d, 0x7e, 0xae, 0x6d, 0xf2,
+ 0xf5, 0x97, 0x7d, 0x6d, 0xd6, 0x51, 0x7f, 0x8c, 0x93, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2f,
+ 0x1d, 0xfa, 0x11, 0xb1, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
new file mode 100644
index 000000000000..918f39fa853f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
@@ -0,0 +1,119 @@
+syntax = "proto3";
+package v3electionpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/etcdserver/etcdserverpb/rpc.proto";
+import "etcd/mvcc/mvccpb/kv.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// The election service exposes client-side election facilities as a gRPC interface.
+service Election {
+ // Campaign waits to acquire leadership in an election, returning a LeaderKey
+ // representing the leadership if successful. The LeaderKey can then be used
+ // to issue new values on the election, transactionally guard API requests on
+ // leadership still being held, and resign from the election.
+ rpc Campaign(CampaignRequest) returns (CampaignResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/campaign"
+ body: "*"
+ };
+ }
+ // Proclaim updates the leader's posted value with a new value.
+ rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/proclaim"
+ body: "*"
+ };
+ }
+ // Leader returns the current election proclamation, if any.
+ rpc Leader(LeaderRequest) returns (LeaderResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/leader"
+ body: "*"
+ };
+ }
+ // Observe streams election proclamations in-order as made by the election's
+ // elected leaders.
+ rpc Observe(LeaderRequest) returns (stream LeaderResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/observe"
+ body: "*"
+ };
+ }
+ // Resign releases election leadership so other campaigners may acquire
+ // leadership on the election.
+ rpc Resign(ResignRequest) returns (ResignResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/resign"
+ body: "*"
+ };
+ }
+}
+
+message CampaignRequest {
+ // name is the election's identifier for the campaign.
+ bytes name = 1;
+ // lease is the ID of the lease attached to leadership of the election. If the
+ // lease expires or is revoked before resigning leadership, then the
+ // leadership is transferred to the next campaigner, if any.
+ int64 lease = 2;
+ // value is the initial proclaimed value set when the campaigner wins the
+ // election.
+ bytes value = 3;
+}
+
+message CampaignResponse {
+ etcdserverpb.ResponseHeader header = 1;
+ // leader describes the resources used for holding leadereship of the election.
+ LeaderKey leader = 2;
+}
+
+message LeaderKey {
+ // name is the election identifier that correponds to the leadership key.
+ bytes name = 1;
+ // key is an opaque key representing the ownership of the election. If the key
+ // is deleted, then leadership is lost.
+ bytes key = 2;
+ // rev is the creation revision of the key. It can be used to test for ownership
+ // of an election during transactions by testing the key's creation revision
+ // matches rev.
+ int64 rev = 3;
+ // lease is the lease ID of the election leader.
+ int64 lease = 4;
+}
+
+message LeaderRequest {
+ // name is the election identifier for the leadership information.
+ bytes name = 1;
+}
+
+message LeaderResponse {
+ etcdserverpb.ResponseHeader header = 1;
+ // kv is the key-value pair representing the latest leader update.
+ mvccpb.KeyValue kv = 2;
+}
+
+message ResignRequest {
+ // leader is the leadership to relinquish by resignation.
+ LeaderKey leader = 1;
+}
+
+message ResignResponse {
+ etcdserverpb.ResponseHeader header = 1;
+}
+
+message ProclaimRequest {
+ // leader is the leadership hold on the election.
+ LeaderKey leader = 1;
+ // value is an update meant to overwrite the leader's current value.
+ bytes value = 2;
+}
+
+message ProclaimResponse {
+ etcdserverpb.ResponseHeader header = 1;
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/doc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/doc.go
new file mode 100644
index 000000000000..e0a1008abc9e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3lock provides a v3 locking service from an etcdserver.
+package v3lock
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/lock.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/lock.go
new file mode 100644
index 000000000000..5a17c86fc2b7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/lock.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3lock
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/clientv3"
+ "go.etcd.io/etcd/clientv3/concurrency"
+ "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb"
+)
+
+type lockServer struct {
+ c *clientv3.Client
+}
+
+func NewLockServer(c *clientv3.Client) v3lockpb.LockServer {
+ return &lockServer{c}
+}
+
+func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
+ s, err := concurrency.NewSession(
+ ls.c,
+ concurrency.WithLease(clientv3.LeaseID(req.Lease)),
+ concurrency.WithContext(ctx),
+ )
+ if err != nil {
+ return nil, err
+ }
+ s.Orphan()
+ m := concurrency.NewMutex(s, string(req.Name))
+ if err = m.Lock(ctx); err != nil {
+ return nil, err
+ }
+ return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil
+}
+
+func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
+ resp, err := ls.c.Delete(ctx, string(req.Key))
+ if err != nil {
+ return nil, err
+ }
+ return &v3lockpb.UnlockResponse{Header: resp.Header}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
new file mode 100644
index 000000000000..1eeeff1853f7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
@@ -0,0 +1,167 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto
+
+/*
+Package v3lockpb is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package gw
+
+import (
+ "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+
+func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq v3lockpb.LockRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq v3lockpb.UnlockRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterLockHandler(ctx, mux, conn)
+}
+
+// RegisterLockHandler registers the http handlers for service Lock to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn))
+}
+
+// RegisterLockHandler registers the http handlers for service Lock to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "LockClient" to call the correct interceptors.
+func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error {
+
+ mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3", "lock"}, ""))
+
+ pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lock", "unlock"}, ""))
+)
+
+var (
+ forward_Lock_Lock_0 = runtime.ForwardResponseMessage
+
+ forward_Lock_Unlock_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
new file mode 100644
index 000000000000..36ebdd90f4a8
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
@@ -0,0 +1,959 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: v3lock.proto
+
+/*
+ Package v3lockpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ v3lock.proto
+
+ It has these top-level messages:
+ LockRequest
+ LockResponse
+ UnlockRequest
+ UnlockResponse
+*/
+package v3lockpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ etcdserverpb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ context "golang.org/x/net/context"
+
+ grpc "google.golang.org/grpc"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type LockRequest struct {
+ // name is the identifier for the distributed shared lock to be acquired.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // lease is the ID of the lease that will be attached to ownership of the
+ // lock. If the lease expires or is revoked and currently holds the lock,
+ // the lock is automatically released. Calls to Lock with the same lease will
+ // be treated as a single acquisition; locking twice with the same lease is a
+ // no-op.
+ Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
+}
+
+func (m *LockRequest) Reset() { *m = LockRequest{} }
+func (m *LockRequest) String() string { return proto.CompactTextString(m) }
+func (*LockRequest) ProtoMessage() {}
+func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} }
+
+func (m *LockRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *LockRequest) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+type LockResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+ // key is a key that will exist on etcd for the duration that the Lock caller
+ // owns the lock. Users should not modify this key or the lock may exhibit
+ // undefined behavior.
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (m *LockResponse) Reset() { *m = LockResponse{} }
+func (m *LockResponse) String() string { return proto.CompactTextString(m) }
+func (*LockResponse) ProtoMessage() {}
+func (*LockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{1} }
+
+func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LockResponse) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+type UnlockRequest struct {
+ // key is the lock ownership key granted by Lock.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (m *UnlockRequest) Reset() { *m = UnlockRequest{} }
+func (m *UnlockRequest) String() string { return proto.CompactTextString(m) }
+func (*UnlockRequest) ProtoMessage() {}
+func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} }
+
+func (m *UnlockRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+type UnlockResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"`
+}
+
+func (m *UnlockResponse) Reset() { *m = UnlockResponse{} }
+func (m *UnlockResponse) String() string { return proto.CompactTextString(m) }
+func (*UnlockResponse) ProtoMessage() {}
+func (*UnlockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{3} }
+
+func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest")
+ proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse")
+ proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest")
+ proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Lock service
+
+type LockClient interface {
+ // Lock acquires a distributed shared lock on a given named lock.
+ // On success, it will return a unique key that exists so long as the
+ // lock is held by the caller. This key can be used in conjunction with
+ // transactions to safely ensure updates to etcd only occur while holding
+ // lock ownership. The lock is held until Unlock is called on the key or the
+ // lease associate with the owner expires.
+ Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error)
+ // Unlock takes a key returned by Lock and releases the hold on lock. The
+ // next Lock caller waiting for the lock will then be woken up and given
+ // ownership of the lock.
+ Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error)
+}
+
+type lockClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewLockClient(cc *grpc.ClientConn) LockClient {
+ return &lockClient{cc}
+}
+
+func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) {
+ out := new(LockResponse)
+ err := grpc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) {
+ out := new(UnlockResponse)
+ err := grpc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Lock service
+
+type LockServer interface {
+ // Lock acquires a distributed shared lock on a given named lock.
+ // On success, it will return a unique key that exists so long as the
+ // lock is held by the caller. This key can be used in conjunction with
+ // transactions to safely ensure updates to etcd only occur while holding
+ // lock ownership. The lock is held until Unlock is called on the key or the
+ // lease associate with the owner expires.
+ Lock(context.Context, *LockRequest) (*LockResponse, error)
+ // Unlock takes a key returned by Lock and releases the hold on lock. The
+ // next Lock caller waiting for the lock will then be woken up and given
+ // ownership of the lock.
+ Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error)
+}
+
+func RegisterLockServer(s *grpc.Server, srv LockServer) {
+ s.RegisterService(&_Lock_serviceDesc, srv)
+}
+
+func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LockServer).Lock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3lockpb.Lock/Lock",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LockServer).Lock(ctx, req.(*LockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UnlockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LockServer).Unlock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3lockpb.Lock/Unlock",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Lock_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "v3lockpb.Lock",
+ HandlerType: (*LockServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Lock",
+ Handler: _Lock_Lock_Handler,
+ },
+ {
+ MethodName: "Unlock",
+ Handler: _Lock_Unlock_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "v3lock.proto",
+}
+
+func (m *LockRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Name) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name)))
+ i += copy(dAtA[i:], m.Name)
+ }
+ if m.Lease != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease))
+ }
+ return i, nil
+}
+
+func (m *LockResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size()))
+ n1, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ if len(m.Key) > 0 {
+ dAtA[i] = 0x12
+ i++
+ i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ return i, nil
+}
+
+func (m *UnlockRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if len(m.Key) > 0 {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
+ i += copy(dAtA[i:], m.Key)
+ }
+ return i, nil
+}
+
+func (m *UnlockResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.Header != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size()))
+ n2, err := m.Header.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *LockRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovV3Lock(uint64(m.Lease))
+ }
+ return n
+}
+
+func (m *LockResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ return n
+}
+
+func (m *UnlockRequest) Size() (n int) {
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ return n
+}
+
+func (m *UnlockResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ return n
+}
+
+func sovV3Lock(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozV3Lock(x uint64) (n int) {
+ return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *LockRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LockRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Lock(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LockResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LockResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Lock(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Lock(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Lock(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipV3Lock(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthV3Lock
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipV3Lock(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("v3lock.proto", fileDescriptorV3Lock) }
+
+var fileDescriptorV3Lock = []byte{
+ // 331 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9,
+ 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44,
+ 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39,
+ 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a,
+ 0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13,
+ 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc,
+ 0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79,
+ 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b,
+ 0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6,
+ 0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a,
+ 0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f,
+ 0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41,
+ 0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a,
+ 0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x03, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42,
+ 0xfe, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31,
+ 0x4d, 0x49, 0xa2, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0x42, 0x4a, 0xbc, 0xfa, 0x65, 0xc6, 0xfa, 0x20,
+ 0x05, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x9c, 0x8b, 0x0d, 0xe2, 0x42, 0x21, 0x71, 0x84, 0x5e,
+ 0x14, 0x6f, 0x49, 0x49, 0x60, 0x4a, 0x40, 0x8d, 0x95, 0x02, 0x1b, 0x2b, 0xa2, 0xc4, 0x0f, 0x37,
+ 0xb6, 0x34, 0x0f, 0x6a, 0xb0, 0x93, 0xc0, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e,
+ 0x78, 0x24, 0xc7, 0x38, 0xe3, 0xb1, 0x1c, 0x43, 0x12, 0x1b, 0x38, 0x1e, 0x8d, 0x01, 0x01, 0x00,
+ 0x00, 0xff, 0xff, 0x65, 0xa8, 0x61, 0xb1, 0x3d, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
new file mode 100644
index 000000000000..7220c7f0a19d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
@@ -0,0 +1,65 @@
+syntax = "proto3";
+package v3lockpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/etcdserver/etcdserverpb/rpc.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// The lock service exposes client-side locking facilities as a gRPC interface.
+service Lock {
+ // Lock acquires a distributed shared lock on a given named lock.
+ // On success, it will return a unique key that exists so long as the
+ // lock is held by the caller. This key can be used in conjunction with
+ // transactions to safely ensure updates to etcd only occur while holding
+ // lock ownership. The lock is held until Unlock is called on the key or the
+ // lease associate with the owner expires.
+ rpc Lock(LockRequest) returns (LockResponse) {
+ option (google.api.http) = {
+ post: "/v3/lock/lock"
+ body: "*"
+ };
+ }
+
+ // Unlock takes a key returned by Lock and releases the hold on lock. The
+ // next Lock caller waiting for the lock will then be woken up and given
+ // ownership of the lock.
+ rpc Unlock(UnlockRequest) returns (UnlockResponse) {
+ option (google.api.http) = {
+ post: "/v3/lock/unlock"
+ body: "*"
+ };
+ }
+}
+
+message LockRequest {
+ // name is the identifier for the distributed shared lock to be acquired.
+ bytes name = 1;
+ // lease is the ID of the lease that will be attached to ownership of the
+ // lock. If the lease expires or is revoked and currently holds the lock,
+ // the lock is automatically released. Calls to Lock with the same lease will
+ // be treated as a single acquisition; locking twice with the same lease is a
+ // no-op.
+ int64 lease = 2;
+}
+
+message LockResponse {
+ etcdserverpb.ResponseHeader header = 1;
+ // key is a key that will exist on etcd for the duration that the Lock caller
+ // owns the lock. Users should not modify this key or the lock may exhibit
+ // undefined behavior.
+ bytes key = 2;
+}
+
+message UnlockRequest {
+ // key is the lock ownership key granted by Lock.
+ bytes key = 1;
+}
+
+message UnlockResponse {
+ etcdserverpb.ResponseHeader header = 1;
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/auth.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/auth.go
new file mode 100644
index 000000000000..62ce757beaa0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/auth.go
@@ -0,0 +1,158 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/etcdserver"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+)
+
+type AuthServer struct {
+ authenticator etcdserver.Authenticator
+}
+
+func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
+ return &AuthServer{authenticator: s}
+}
+
+func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+ resp, err := as.authenticator.AuthEnable(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+ resp, err := as.authenticator.AuthDisable(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ resp, err := as.authenticator.Authenticate(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := as.authenticator.RoleAdd(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := as.authenticator.RoleDelete(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := as.authenticator.RoleGet(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := as.authenticator.RoleList(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := as.authenticator.RoleRevokePermission(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := as.authenticator.RoleGrantPermission(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ resp, err := as.authenticator.UserAdd(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := as.authenticator.UserDelete(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := as.authenticator.UserGet(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := as.authenticator.UserList(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := as.authenticator.UserGrantRole(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := as.authenticator.UserRevokeRole(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ resp, err := as.authenticator.UserChangePassword(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/codec.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/codec.go
new file mode 100644
index 000000000000..17a2c87ae61d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/codec.go
@@ -0,0 +1,34 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import "github.com/gogo/protobuf/proto"
+
+type codec struct{}
+
+func (c *codec) Marshal(v interface{}) ([]byte, error) {
+ b, err := proto.Marshal(v.(proto.Message))
+ sentBytes.Add(float64(len(b)))
+ return b, err
+}
+
+func (c *codec) Unmarshal(data []byte, v interface{}) error {
+ receivedBytes.Add(float64(len(data)))
+ return proto.Unmarshal(data, v.(proto.Message))
+}
+
+func (c *codec) String() string {
+ return "proto"
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/grpc.go
new file mode 100644
index 000000000000..3332016617db
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/grpc.go
@@ -0,0 +1,77 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "crypto/tls"
+ "math"
+
+ "go.etcd.io/etcd/etcdserver"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
+ grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+ "go.etcd.io/etcd/clientv3/credentials"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/health"
+ healthpb "google.golang.org/grpc/health/grpc_health_v1"
+)
+
+const (
+ grpcOverheadBytes = 512 * 1024
+ maxStreams = math.MaxUint32
+ maxSendBytes = math.MaxInt32
+)
+
+func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
+ var opts []grpc.ServerOption
+ opts = append(opts, grpc.CustomCodec(&codec{}))
+ if tls != nil {
+ bundle := credentials.NewBundle(credentials.Config{TLSConfig: tls})
+ opts = append(opts, grpc.Creds(bundle.TransportCredentials()))
+ }
+ opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
+ newLogUnaryInterceptor(s),
+ newUnaryInterceptor(s),
+ grpc_prometheus.UnaryServerInterceptor,
+ )))
+ opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
+ newStreamInterceptor(s),
+ grpc_prometheus.StreamServerInterceptor,
+ )))
+ opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
+ opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
+ opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
+ grpcServer := grpc.NewServer(append(opts, gopts...)...)
+
+ pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
+ pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
+ pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
+ pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
+ pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
+ pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
+
+ // server should register all the services manually
+ // use empty service name for all etcd services' health status,
+ // see https://github.com/grpc/grpc/blob/master/doc/health-checking.md for more
+ hsrv := health.NewServer()
+ hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING)
+ healthpb.RegisterHealthServer(grpcServer, hsrv)
+
+ // set zero values for metrics registered for this grpc server
+ grpc_prometheus.Register(grpcServer)
+
+ return grpcServer
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/header.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/header.go
new file mode 100644
index 000000000000..f23b6a738563
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/header.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "go.etcd.io/etcd/etcdserver"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+)
+
+type header struct {
+ clusterID int64
+ memberID int64
+ sg etcdserver.RaftStatusGetter
+ rev func() int64
+}
+
+func newHeader(s *etcdserver.EtcdServer) header {
+ return header{
+ clusterID: int64(s.Cluster().ID()),
+ memberID: int64(s.ID()),
+ sg: s,
+ rev: func() int64 { return s.KV().Rev() },
+ }
+}
+
+// fill populates pb.ResponseHeader using etcdserver information
+func (h *header) fill(rh *pb.ResponseHeader) {
+ if rh == nil {
+ plog.Panic("unexpected nil resp.Header")
+ }
+ rh.ClusterId = uint64(h.clusterID)
+ rh.MemberId = uint64(h.memberID)
+ rh.RaftTerm = h.sg.Term()
+ if rh.Revision == 0 {
+ rh.Revision = h.rev()
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/interceptor.go
new file mode 100644
index 000000000000..0a3b48e86626
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/interceptor.go
@@ -0,0 +1,292 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+)
+
+const (
+ maxNoLeaderCnt = 3
+)
+
+type streamsMap struct {
+ mu sync.Mutex
+ streams map[grpc.ServerStream]struct{}
+}
+
+func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ if !api.IsCapabilityEnabled(api.V3rpcCapability) {
+ return nil, rpctypes.ErrGRPCNotCapable
+ }
+
+ if s.IsMemberExist(s.ID()) && s.IsLearner() && !isRPCSupportedForLearner(req) {
+ return nil, rpctypes.ErrGPRCNotSupportedForLearner
+ }
+
+ md, ok := metadata.FromIncomingContext(ctx)
+ if ok {
+ ver, vs := "unknown", metadataGet(md, rpctypes.MetadataClientAPIVersionKey)
+ if len(vs) > 0 {
+ ver = vs[0]
+ }
+ clientRequests.WithLabelValues("unary", ver).Inc()
+
+ if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
+ if s.Leader() == types.ID(raft.None) {
+ return nil, rpctypes.ErrGRPCNoLeader
+ }
+ }
+ }
+
+ return handler(ctx, req)
+ }
+}
+
+func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ startTime := time.Now()
+ resp, err := handler(ctx, req)
+ lg := s.Logger()
+ if (lg != nil && lg.Core().Enabled(zap.DebugLevel)) || // using zap logger and debug level is enabled
+ (lg == nil && plog.LevelAt(capnslog.DEBUG)) { // or, using capnslog and debug level is enabled
+ defer logUnaryRequestStats(ctx, lg, info, startTime, req, resp)
+ }
+ return resp, err
+ }
+}
+
+func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) {
+ duration := time.Since(startTime)
+ remote := "No remote client info."
+ peerInfo, ok := peer.FromContext(ctx)
+ if ok {
+ remote = peerInfo.Addr.String()
+ }
+ responseType := info.FullMethod
+ var reqCount, respCount int64
+ var reqSize, respSize int
+ var reqContent string
+ switch _resp := resp.(type) {
+ case *pb.RangeResponse:
+ _req, ok := req.(*pb.RangeRequest)
+ if ok {
+ reqCount = 0
+ reqSize = _req.Size()
+ reqContent = _req.String()
+ }
+ if _resp != nil {
+ respCount = _resp.GetCount()
+ respSize = _resp.Size()
+ }
+ case *pb.PutResponse:
+ _req, ok := req.(*pb.PutRequest)
+ if ok {
+ reqCount = 1
+ reqSize = _req.Size()
+ reqContent = pb.NewLoggablePutRequest(_req).String()
+ // redact value field from request content, see PR #9821
+ }
+ if _resp != nil {
+ respCount = 0
+ respSize = _resp.Size()
+ }
+ case *pb.DeleteRangeResponse:
+ _req, ok := req.(*pb.DeleteRangeRequest)
+ if ok {
+ reqCount = 0
+ reqSize = _req.Size()
+ reqContent = _req.String()
+ }
+ if _resp != nil {
+ respCount = _resp.GetDeleted()
+ respSize = _resp.Size()
+ }
+ case *pb.TxnResponse:
+ _req, ok := req.(*pb.TxnRequest)
+ if ok && _resp != nil {
+ if _resp.GetSucceeded() { // determine the 'actual' count and size of request based on success or failure
+ reqCount = int64(len(_req.GetSuccess()))
+ reqSize = 0
+ for _, r := range _req.GetSuccess() {
+ reqSize += r.Size()
+ }
+ } else {
+ reqCount = int64(len(_req.GetFailure()))
+ reqSize = 0
+ for _, r := range _req.GetFailure() {
+ reqSize += r.Size()
+ }
+ }
+ reqContent = pb.NewLoggableTxnRequest(_req).String()
+ // redact value field from request content, see PR #9821
+ }
+ if _resp != nil {
+ respCount = 0
+ respSize = _resp.Size()
+ }
+ default:
+ reqCount = -1
+ reqSize = -1
+ respCount = -1
+ respSize = -1
+ }
+
+ logGenericRequestStats(lg, startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent)
+}
+
+func logGenericRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string,
+ reqCount int64, reqSize int, respCount int64, respSize int, reqContent string) {
+ if lg == nil {
+ plog.Debugf("start time = %v, "+
+ "time spent = %v, "+
+ "remote = %s, "+
+ "response type = %s, "+
+ "request count = %d, "+
+ "request size = %d, "+
+ "response count = %d, "+
+ "response size = %d, "+
+ "request content = %s",
+ startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent,
+ )
+ } else {
+ lg.Debug("request stats",
+ zap.Time("start time", startTime),
+ zap.Duration("time spent", duration),
+ zap.String("remote", remote),
+ zap.String("response type", responseType),
+ zap.Int64("request count", reqCount),
+ zap.Int("request size", reqSize),
+ zap.Int64("response count", respCount),
+ zap.Int("response size", respSize),
+ zap.String("request content", reqContent),
+ )
+ }
+}
+
+func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
+ smap := monitorLeader(s)
+
+ return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ if !api.IsCapabilityEnabled(api.V3rpcCapability) {
+ return rpctypes.ErrGRPCNotCapable
+ }
+
+ if s.IsMemberExist(s.ID()) && s.IsLearner() { // learner does not support stream RPC
+ return rpctypes.ErrGPRCNotSupportedForLearner
+ }
+
+ md, ok := metadata.FromIncomingContext(ss.Context())
+ if ok {
+ ver, vs := "unknown", metadataGet(md, rpctypes.MetadataClientAPIVersionKey)
+ if len(vs) > 0 {
+ ver = vs[0]
+ }
+ clientRequests.WithLabelValues("stream", ver).Inc()
+
+ if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
+ if s.Leader() == types.ID(raft.None) {
+ return rpctypes.ErrGRPCNoLeader
+ }
+
+ cctx, cancel := context.WithCancel(ss.Context())
+ ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
+
+ smap.mu.Lock()
+ smap.streams[ss] = struct{}{}
+ smap.mu.Unlock()
+
+ defer func() {
+ smap.mu.Lock()
+ delete(smap.streams, ss)
+ smap.mu.Unlock()
+ cancel()
+ }()
+ }
+ }
+
+ return handler(srv, ss)
+ }
+}
+
+type serverStreamWithCtx struct {
+ grpc.ServerStream
+ ctx context.Context
+ cancel *context.CancelFunc
+}
+
+func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
+
+func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
+ smap := &streamsMap{
+ streams: make(map[grpc.ServerStream]struct{}),
+ }
+
+ go func() {
+ election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
+ noLeaderCnt := 0
+
+ for {
+ select {
+ case <-s.StopNotify():
+ return
+ case <-time.After(election):
+ if s.Leader() == types.ID(raft.None) {
+ noLeaderCnt++
+ } else {
+ noLeaderCnt = 0
+ }
+
+ // We are more conservative on canceling existing streams. Reconnecting streams
+ // cost much more than just rejecting new requests. So we wait until the member
+ // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
+ if noLeaderCnt >= maxNoLeaderCnt {
+ smap.mu.Lock()
+ for ss := range smap.streams {
+ if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
+ (*ssWithCtx.cancel)()
+ <-ss.Context().Done()
+ }
+ }
+ smap.streams = make(map[grpc.ServerStream]struct{})
+ smap.mu.Unlock()
+ }
+ }
+ }
+ }()
+
+ return smap
+}
+
+func metadataGet(md metadata.MD, k string) []string {
+ k = strings.ToLower(k)
+ return md[k]
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/key.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/key.go
new file mode 100644
index 000000000000..ff59bac346ea
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/key.go
@@ -0,0 +1,277 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3rpc implements etcd v3 RPC system based on gRPC.
+package v3rpc
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/adt"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver/api/v3rpc")
+)
+
+type kvServer struct {
+ hdr header
+ kv etcdserver.RaftKV
+ // maxTxnOps is the max operations per txn.
+ // e.g suppose maxTxnOps = 128.
+ // Txn.Success can have at most 128 operations,
+ // and Txn.Failure can have at most 128 operations.
+ maxTxnOps uint
+}
+
+func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
+ return &kvServer{hdr: newHeader(s), kv: s, maxTxnOps: s.Cfg.MaxTxnOps}
+}
+
+func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ if err := checkRangeRequest(r); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.Range(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ if err := checkPutRequest(r); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.Put(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ if err := checkDeleteRequest(r); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.DeleteRange(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if err := checkTxnRequest(r, int(s.maxTxnOps)); err != nil {
+ return nil, err
+ }
+ // check for forbidden put/del overlaps after checking request to avoid quadratic blowup
+ if _, _, err := checkIntervals(r.Success); err != nil {
+ return nil, err
+ }
+ if _, _, err := checkIntervals(r.Failure); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.Txn(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+ resp, err := s.kv.Compact(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func checkRangeRequest(r *pb.RangeRequest) error {
+ if len(r.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ return nil
+}
+
+func checkPutRequest(r *pb.PutRequest) error {
+ if len(r.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ if r.IgnoreValue && len(r.Value) != 0 {
+ return rpctypes.ErrGRPCValueProvided
+ }
+ if r.IgnoreLease && r.Lease != 0 {
+ return rpctypes.ErrGRPCLeaseProvided
+ }
+ return nil
+}
+
+func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
+ if len(r.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ return nil
+}
+
+func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error {
+ opc := len(r.Compare)
+ if opc < len(r.Success) {
+ opc = len(r.Success)
+ }
+ if opc < len(r.Failure) {
+ opc = len(r.Failure)
+ }
+ if opc > maxTxnOps {
+ return rpctypes.ErrGRPCTooManyOps
+ }
+
+ for _, c := range r.Compare {
+ if len(c.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ }
+ for _, u := range r.Success {
+ if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
+ return err
+ }
+ }
+ for _, u := range r.Failure {
+ if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// checkIntervals tests whether puts and deletes overlap for a list of ops. If
+// there is an overlap, returns an error. If no overlap, return put and delete
+// sets for recursive evaluation.
+func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) {
+ dels := adt.NewIntervalTree()
+
+ // collect deletes from this level; build first to check lower level overlapped puts
+ for _, req := range reqs {
+ tv, ok := req.Request.(*pb.RequestOp_RequestDeleteRange)
+ if !ok {
+ continue
+ }
+ dreq := tv.RequestDeleteRange
+ if dreq == nil {
+ continue
+ }
+ var iv adt.Interval
+ if len(dreq.RangeEnd) != 0 {
+ iv = adt.NewStringAffineInterval(string(dreq.Key), string(dreq.RangeEnd))
+ } else {
+ iv = adt.NewStringAffinePoint(string(dreq.Key))
+ }
+ dels.Insert(iv, struct{}{})
+ }
+
+ // collect children puts/deletes
+ puts := make(map[string]struct{})
+ for _, req := range reqs {
+ tv, ok := req.Request.(*pb.RequestOp_RequestTxn)
+ if !ok {
+ continue
+ }
+ putsThen, delsThen, err := checkIntervals(tv.RequestTxn.Success)
+ if err != nil {
+ return nil, dels, err
+ }
+ putsElse, delsElse, err := checkIntervals(tv.RequestTxn.Failure)
+ if err != nil {
+ return nil, dels, err
+ }
+ for k := range putsThen {
+ if _, ok := puts[k]; ok {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ if dels.Intersects(adt.NewStringAffinePoint(k)) {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ puts[k] = struct{}{}
+ }
+ for k := range putsElse {
+ if _, ok := puts[k]; ok {
+ // if key is from putsThen, overlap is OK since
+ // either then/else are mutually exclusive
+ if _, isSafe := putsThen[k]; !isSafe {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ }
+ if dels.Intersects(adt.NewStringAffinePoint(k)) {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ puts[k] = struct{}{}
+ }
+ dels.Union(delsThen, adt.NewStringAffineInterval("\x00", ""))
+ dels.Union(delsElse, adt.NewStringAffineInterval("\x00", ""))
+ }
+
+ // collect and check this level's puts
+ for _, req := range reqs {
+ tv, ok := req.Request.(*pb.RequestOp_RequestPut)
+ if !ok || tv.RequestPut == nil {
+ continue
+ }
+ k := string(tv.RequestPut.Key)
+ if _, ok := puts[k]; ok {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ if dels.Intersects(adt.NewStringAffinePoint(k)) {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ puts[k] = struct{}{}
+ }
+ return puts, dels, nil
+}
+
+func checkRequestOp(u *pb.RequestOp, maxTxnOps int) error {
+ // TODO: ensure only one of the field is set.
+ switch uv := u.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ return checkRangeRequest(uv.RequestRange)
+ case *pb.RequestOp_RequestPut:
+ return checkPutRequest(uv.RequestPut)
+ case *pb.RequestOp_RequestDeleteRange:
+ return checkDeleteRequest(uv.RequestDeleteRange)
+ case *pb.RequestOp_RequestTxn:
+ return checkTxnRequest(uv.RequestTxn, maxTxnOps)
+ default:
+ // empty op / nil entry
+ return rpctypes.ErrGRPCKeyNotFound
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/lease.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/lease.go
new file mode 100644
index 000000000000..7441beedf15e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/lease.go
@@ -0,0 +1,169 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "io"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/lease"
+
+ "go.uber.org/zap"
+)
+
+type LeaseServer struct {
+ lg *zap.Logger
+ hdr header
+ le etcdserver.Lessor
+}
+
+func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
+ return &LeaseServer{lg: s.Cfg.Logger, le: s, hdr: newHeader(s)}
+}
+
+func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ resp, err := ls.le.LeaseGrant(ctx, cr)
+
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ resp, err := ls.le.LeaseRevoke(ctx, rr)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ resp, err := ls.le.LeaseTimeToLive(ctx, rr)
+ if err != nil && err != lease.ErrLeaseNotFound {
+ return nil, togRPCError(err)
+ }
+ if err == lease.ErrLeaseNotFound {
+ resp = &pb.LeaseTimeToLiveResponse{
+ Header: &pb.ResponseHeader{},
+ ID: rr.ID,
+ TTL: -1,
+ }
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+ resp, err := ls.le.LeaseLeases(ctx, rr)
+ if err != nil && err != lease.ErrLeaseNotFound {
+ return nil, togRPCError(err)
+ }
+ if err == lease.ErrLeaseNotFound {
+ resp = &pb.LeaseLeasesResponse{
+ Header: &pb.ResponseHeader{},
+ Leases: []*pb.LeaseStatus{},
+ }
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
+ errc := make(chan error, 1)
+ go func() {
+ errc <- ls.leaseKeepAlive(stream)
+ }()
+ select {
+ case err = <-errc:
+ case <-stream.Context().Done():
+ // the only server-side cancellation is noleader for now.
+ err = stream.Context().Err()
+ if err == context.Canceled {
+ err = rpctypes.ErrGRPCNoLeader
+ }
+ }
+ return err
+}
+
+func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
+ for {
+ req, err := stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ if isClientCtxErr(stream.Context().Err(), err) {
+ if ls.lg != nil {
+ ls.lg.Debug("failed to receive lease keepalive request from gRPC stream", zap.Error(err))
+ } else {
+ plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
+ }
+ } else {
+ if ls.lg != nil {
+ ls.lg.Warn("failed to receive lease keepalive request from gRPC stream", zap.Error(err))
+ } else {
+ plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
+ }
+ streamFailures.WithLabelValues("receive", "lease-keepalive").Inc()
+ }
+ return err
+ }
+
+ // Create header before we sent out the renew request.
+ // This can make sure that the revision is strictly smaller or equal to
+ // when the keepalive happened at the local server (when the local server is the leader)
+ // or remote leader.
+ // Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
+ // at rev 4.
+ resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
+ ls.hdr.fill(resp.Header)
+
+ ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
+ if err == lease.ErrLeaseNotFound {
+ err = nil
+ ttl = 0
+ }
+
+ if err != nil {
+ return togRPCError(err)
+ }
+
+ resp.TTL = ttl
+ err = stream.Send(resp)
+ if err != nil {
+ if isClientCtxErr(stream.Context().Err(), err) {
+ if ls.lg != nil {
+ ls.lg.Debug("failed to send lease keepalive response to gRPC stream", zap.Error(err))
+ } else {
+ plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
+ }
+ } else {
+ if ls.lg != nil {
+ ls.lg.Warn("failed to send lease keepalive response to gRPC stream", zap.Error(err))
+ } else {
+ plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
+ }
+ streamFailures.WithLabelValues("send", "lease-keepalive").Inc()
+ }
+ return err
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/maintenance.go
new file mode 100644
index 000000000000..8130adbf0abb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/maintenance.go
@@ -0,0 +1,313 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "crypto/sha256"
+ "io"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "go.etcd.io/etcd/auth"
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/raft"
+ "go.etcd.io/etcd/version"
+
+ "go.uber.org/zap"
+)
+
+type KVGetter interface {
+ KV() mvcc.ConsistentWatchableKV
+}
+
+type BackendGetter interface {
+ Backend() backend.Backend
+}
+
+type Alarmer interface {
+ // Alarms is implemented in Server interface located in etcdserver/server.go
+ // It returns a list of alarms present in the AlarmStore
+ Alarms() []*pb.AlarmMember
+ Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
+}
+
+type LeaderTransferrer interface {
+ MoveLeader(ctx context.Context, lead, target uint64) error
+}
+
+type AuthGetter interface {
+ AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
+ AuthStore() auth.AuthStore
+}
+
+type ClusterStatusGetter interface {
+ IsLearner() bool
+}
+
+type maintenanceServer struct {
+ lg *zap.Logger
+ rg etcdserver.RaftStatusGetter
+ kg KVGetter
+ bg BackendGetter
+ a Alarmer
+ lt LeaderTransferrer
+ hdr header
+ cs ClusterStatusGetter
+}
+
+func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
+ srv := &maintenanceServer{lg: s.Cfg.Logger, rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s), cs: s}
+ return &authMaintenanceServer{srv, s}
+}
+
+func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+ if ms.lg != nil {
+ ms.lg.Info("starting defragment")
+ } else {
+ plog.Noticef("starting to defragment the storage backend...")
+ }
+ err := ms.bg.Backend().Defrag()
+ if err != nil {
+ if ms.lg != nil {
+ ms.lg.Warn("failed to defragment", zap.Error(err))
+ } else {
+ plog.Errorf("failed to defragment the storage backend (%v)", err)
+ }
+ return nil, err
+ }
+ if ms.lg != nil {
+ ms.lg.Info("finished defragment")
+ } else {
+ plog.Noticef("finished defragmenting the storage backend")
+ }
+ return &pb.DefragmentResponse{}, nil
+}
+
+// big enough size to hold >1 OS pages in the buffer
+const snapshotSendBufferSize = 32 * 1024
+
+func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
+ snap := ms.bg.Backend().Snapshot()
+ pr, pw := io.Pipe()
+
+ defer pr.Close()
+
+ go func() {
+ snap.WriteTo(pw)
+ if err := snap.Close(); err != nil {
+ if ms.lg != nil {
+ ms.lg.Warn("failed to close snapshot", zap.Error(err))
+ } else {
+ plog.Errorf("error closing snapshot (%v)", err)
+ }
+ }
+ pw.Close()
+ }()
+
+ // record SHA digest of snapshot data
+ // used for integrity checks during snapshot restore operation
+ h := sha256.New()
+
+ // buffer just holds read bytes from stream
+ // response size is multiple of OS page size, fetched in boltdb
+ // e.g. 4*1024
+ buf := make([]byte, snapshotSendBufferSize)
+
+ sent := int64(0)
+ total := snap.Size()
+ size := humanize.Bytes(uint64(total))
+
+ start := time.Now()
+ if ms.lg != nil {
+ ms.lg.Info("sending database snapshot to client",
+ zap.Int64("total-bytes", total),
+ zap.String("size", size),
+ )
+ } else {
+ plog.Infof("sending database snapshot to client %s [%d bytes]", size, total)
+ }
+ for total-sent > 0 {
+ n, err := io.ReadFull(pr, buf)
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return togRPCError(err)
+ }
+ sent += int64(n)
+
+ // if total is x * snapshotSendBufferSize. it is possible that
+ // resp.RemainingBytes == 0
+ // resp.Blob == zero byte but not nil
+ // does this make server response sent to client nil in proto
+ // and client stops receiving from snapshot stream before
+ // server sends snapshot SHA?
+ // No, the client will still receive non-nil response
+ // until server closes the stream with EOF
+
+ resp := &pb.SnapshotResponse{
+ RemainingBytes: uint64(total - sent),
+ Blob: buf[:n],
+ }
+ if err = srv.Send(resp); err != nil {
+ return togRPCError(err)
+ }
+ h.Write(buf[:n])
+ }
+
+ // send SHA digest for integrity checks
+ // during snapshot restore operation
+ sha := h.Sum(nil)
+
+ if ms.lg != nil {
+ ms.lg.Info("sending database sha256 checksum to client",
+ zap.Int64("total-bytes", total),
+ zap.Int("checksum-size", len(sha)),
+ )
+ } else {
+ plog.Infof("sending database sha256 checksum to client [%d bytes]", len(sha))
+ }
+
+ hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
+ if err := srv.Send(hresp); err != nil {
+ return togRPCError(err)
+ }
+
+ if ms.lg != nil {
+ ms.lg.Info("successfully sent database snapshot to client",
+ zap.Int64("total-bytes", total),
+ zap.String("size", size),
+ zap.String("took", humanize.Time(start)),
+ )
+ } else {
+ plog.Infof("successfully sent database snapshot to client %s [%d bytes]", size, total)
+ }
+
+ return nil
+}
+
+func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+ h, rev, err := ms.kg.KV().Hash()
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+ h, rev, compactRev, err := ms.kg.KV().HashByRev(r.Revision)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h, CompactRevision: compactRev}
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ return ms.a.Alarm(ctx, ar)
+}
+
+func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
+ hdr := &pb.ResponseHeader{}
+ ms.hdr.fill(hdr)
+ resp := &pb.StatusResponse{
+ Header: hdr,
+ Version: version.Version,
+ Leader: uint64(ms.rg.Leader()),
+ RaftIndex: ms.rg.CommittedIndex(),
+ RaftAppliedIndex: ms.rg.AppliedIndex(),
+ RaftTerm: ms.rg.Term(),
+ DbSize: ms.bg.Backend().Size(),
+ DbSizeInUse: ms.bg.Backend().SizeInUse(),
+ IsLearner: ms.cs.IsLearner(),
+ }
+ if resp.Leader == raft.None {
+ resp.Errors = append(resp.Errors, etcdserver.ErrNoLeader.Error())
+ }
+ for _, a := range ms.a.Alarms() {
+ resp.Errors = append(resp.Errors, a.String())
+ }
+ return resp, nil
+}
+
+func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+ if ms.rg.ID() != ms.rg.Leader() {
+ return nil, rpctypes.ErrGRPCNotLeader
+ }
+
+ if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MoveLeaderResponse{}, nil
+}
+
+type authMaintenanceServer struct {
+ *maintenanceServer
+ ag AuthGetter
+}
+
+func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
+ authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+
+ return ams.ag.AuthStore().IsAdminPermitted(authInfo)
+}
+
+func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+ if err := ams.isAuthenticated(ctx); err != nil {
+ return nil, err
+ }
+
+ return ams.maintenanceServer.Defragment(ctx, sr)
+}
+
+func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
+ if err := ams.isAuthenticated(srv.Context()); err != nil {
+ return err
+ }
+
+ return ams.maintenanceServer.Snapshot(sr, srv)
+}
+
+func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+ if err := ams.isAuthenticated(ctx); err != nil {
+ return nil, err
+ }
+
+ return ams.maintenanceServer.Hash(ctx, r)
+}
+
+func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+ if err := ams.isAuthenticated(ctx); err != nil {
+ return nil, err
+ }
+ return ams.maintenanceServer.HashKV(ctx, r)
+}
+
+func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
+ return ams.maintenanceServer.Status(ctx, ar)
+}
+
+func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+ return ams.maintenanceServer.MoveLeader(ctx, tr)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/member.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/member.go
new file mode 100644
index 000000000000..b2ebc9898405
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/member.go
@@ -0,0 +1,119 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/types"
+)
+
+type ClusterServer struct {
+ cluster api.Cluster
+ server etcdserver.ServerV3
+}
+
+func NewClusterServer(s etcdserver.ServerV3) *ClusterServer {
+ return &ClusterServer{
+ cluster: s.Cluster(),
+ server: s,
+ }
+}
+
+func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
+ urls, err := types.NewURLs(r.PeerURLs)
+ if err != nil {
+ return nil, rpctypes.ErrGRPCMemberBadURLs
+ }
+
+ now := time.Now()
+ var m *membership.Member
+ if r.IsLearner {
+ m = membership.NewMemberAsLearner("", urls, "", &now)
+ } else {
+ m = membership.NewMember("", urls, "", &now)
+ }
+ membs, merr := cs.server.AddMember(ctx, *m)
+ if merr != nil {
+ return nil, togRPCError(merr)
+ }
+
+ return &pb.MemberAddResponse{
+ Header: cs.header(),
+ Member: &pb.Member{
+ ID: uint64(m.ID),
+ PeerURLs: m.PeerURLs,
+ IsLearner: m.IsLearner,
+ },
+ Members: membersToProtoMembers(membs),
+ }, nil
+}
+
+func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
+ membs, err := cs.server.RemoveMember(ctx, r.ID)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
+ m := membership.Member{
+ ID: types.ID(r.ID),
+ RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
+ }
+ membs, err := cs.server.UpdateMember(ctx, m)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
+ membs := membersToProtoMembers(cs.cluster.Members())
+ return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
+}
+
+func (cs *ClusterServer) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) {
+ membs, err := cs.server.PromoteMember(ctx, r.ID)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MemberPromoteResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) header() *pb.ResponseHeader {
+ return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.server.Term()}
+}
+
+func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
+ protoMembs := make([]*pb.Member, len(membs))
+ for i := range membs {
+ protoMembs[i] = &pb.Member{
+ Name: membs[i].Name,
+ ID: uint64(membs[i].ID),
+ PeerURLs: membs[i].PeerURLs,
+ ClientURLs: membs[i].ClientURLs,
+ IsLearner: membs[i].IsLearner,
+ }
+ }
+ return protoMembs
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/metrics.go
new file mode 100644
index 000000000000..a4ee723c52f9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/metrics.go
@@ -0,0 +1,58 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "client_grpc_sent_bytes_total",
+ Help: "The total number of bytes sent to grpc clients.",
+ })
+
+ receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "client_grpc_received_bytes_total",
+ Help: "The total number of bytes received from grpc clients.",
+ })
+
+ streamFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "server_stream_failures_total",
+ Help: "The total number of stream failures from the local server.",
+ },
+ []string{"Type", "API"},
+ )
+
+ clientRequests = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "client_requests_total",
+ Help: "The total number of client requests per client version.",
+ },
+ []string{"type", "client_api_version"},
+ )
+)
+
+func init() {
+ prometheus.MustRegister(sentBytes)
+ prometheus.MustRegister(receivedBytes)
+ prometheus.MustRegister(streamFailures)
+ prometheus.MustRegister(clientRequests)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/quota.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/quota.go
new file mode 100644
index 000000000000..a145b8b0911e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/quota.go
@@ -0,0 +1,90 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/types"
+)
+
+type quotaKVServer struct {
+ pb.KVServer
+ qa quotaAlarmer
+}
+
+type quotaAlarmer struct {
+ q etcdserver.Quota
+ a Alarmer
+ id types.ID
+}
+
+// check whether request satisfies the quota. If there is not enough space,
+// ignore request and raise the free space alarm.
+func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
+ if qa.q.Available(r) {
+ return nil
+ }
+ req := &pb.AlarmRequest{
+ MemberID: uint64(qa.id),
+ Action: pb.AlarmRequest_ACTIVATE,
+ Alarm: pb.AlarmType_NOSPACE,
+ }
+ qa.a.Alarm(ctx, req)
+ return rpctypes.ErrGRPCNoSpace
+}
+
+func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
+ return "aKVServer{
+ NewKVServer(s),
+ quotaAlarmer{etcdserver.NewBackendQuota(s, "kv"), s, s.ID()},
+ }
+}
+
+func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ if err := s.qa.check(ctx, r); err != nil {
+ return nil, err
+ }
+ return s.KVServer.Put(ctx, r)
+}
+
+func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if err := s.qa.check(ctx, r); err != nil {
+ return nil, err
+ }
+ return s.KVServer.Txn(ctx, r)
+}
+
+type quotaLeaseServer struct {
+ pb.LeaseServer
+ qa quotaAlarmer
+}
+
+func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ if err := s.qa.check(ctx, cr); err != nil {
+ return nil, err
+ }
+ return s.LeaseServer.LeaseGrant(ctx, cr)
+}
+
+func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
+ return "aLeaseServer{
+ NewLeaseServer(s),
+ quotaAlarmer{etcdserver.NewBackendQuota(s, "lease"), s, s.ID()},
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go
index 5c590e1aec99..90b8b835b168 100644
--- a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go
@@ -17,4 +17,6 @@ package rpctypes
var (
MetadataRequireLeaderKey = "hasleader"
MetadataHasLeader = "true"
+
+ MetadataClientAPIVersionKey = "client-api-version"
)
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/util.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/util.go
new file mode 100644
index 000000000000..281ddc7a0dbd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/util.go
@@ -0,0 +1,136 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "strings"
+
+ "go.etcd.io/etcd/auth"
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/mvcc"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+var toGRPCErrorMap = map[error]error{
+ membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound,
+ membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound,
+ membership.ErrIDExists: rpctypes.ErrGRPCMemberExist,
+ membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist,
+ membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner,
+ membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners,
+ etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted,
+ etcdserver.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady,
+
+ mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted,
+ mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev,
+ etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge,
+ etcdserver.ErrNoSpace: rpctypes.ErrGRPCNoSpace,
+ etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests,
+
+ etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
+ etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
+ etcdserver.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged,
+ etcdserver.ErrStopped: rpctypes.ErrGRPCStopped,
+ etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
+ etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
+ etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
+ etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
+ etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
+ etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
+ etcdserver.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee,
+
+ lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound,
+ lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist,
+ lease.ErrLeaseTTLTooLarge: rpctypes.ErrGRPCLeaseTTLTooLarge,
+
+ auth.ErrRootUserNotExist: rpctypes.ErrGRPCRootUserNotExist,
+ auth.ErrRootRoleNotExist: rpctypes.ErrGRPCRootRoleNotExist,
+ auth.ErrUserAlreadyExist: rpctypes.ErrGRPCUserAlreadyExist,
+ auth.ErrUserEmpty: rpctypes.ErrGRPCUserEmpty,
+ auth.ErrUserNotFound: rpctypes.ErrGRPCUserNotFound,
+ auth.ErrRoleAlreadyExist: rpctypes.ErrGRPCRoleAlreadyExist,
+ auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound,
+ auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty,
+ auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed,
+ auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied,
+ auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted,
+ auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted,
+ auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled,
+ auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken,
+ auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt,
+}
+
+func togRPCError(err error) error {
+ // let gRPC server convert to codes.Canceled, codes.DeadlineExceeded
+ if err == context.Canceled || err == context.DeadlineExceeded {
+ return err
+ }
+ grpcErr, ok := toGRPCErrorMap[err]
+ if !ok {
+ return status.Error(codes.Unknown, err.Error())
+ }
+ return grpcErr
+}
+
+func isClientCtxErr(ctxErr error, err error) bool {
+ if ctxErr != nil {
+ return true
+ }
+
+ ev, ok := status.FromError(err)
+ if !ok {
+ return false
+ }
+
+ switch ev.Code() {
+ case codes.Canceled, codes.DeadlineExceeded:
+ // client-side context cancel or deadline exceeded
+ // "rpc error: code = Canceled desc = context canceled"
+ // "rpc error: code = DeadlineExceeded desc = context deadline exceeded"
+ return true
+ case codes.Unavailable:
+ msg := ev.Message()
+ // client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected")
+ // "rpc error: code = Unavailable desc = client disconnected"
+ if msg == "client disconnected" {
+ return true
+ }
+ // "grpc/transport.ClientTransport.CloseStream" on canceled streams
+ // "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL")
+ if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") {
+ return true
+ }
+ }
+ return false
+}
+
+// in v3.4, learner is allowed to serve serializable read and endpoint status
+func isRPCSupportedForLearner(req interface{}) bool {
+ switch r := req.(type) {
+ case *pb.StatusRequest:
+ return true
+ case *pb.RangeRequest:
+ return r.Serializable
+ default:
+ return false
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/watch.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/watch.go
new file mode 100644
index 000000000000..dcc4cc637009
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/watch.go
@@ -0,0 +1,585 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "io"
+ "math/rand"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/auth"
+ "go.etcd.io/etcd/etcdserver"
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+
+ "go.uber.org/zap"
+)
+
+type watchServer struct {
+ lg *zap.Logger
+
+ clusterID int64
+ memberID int64
+
+ maxRequestBytes int
+
+ sg etcdserver.RaftStatusGetter
+ watchable mvcc.WatchableKV
+ ag AuthGetter
+}
+
+// NewWatchServer returns a new watch server.
+func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
+ return &watchServer{
+ lg: s.Cfg.Logger,
+
+ clusterID: int64(s.Cluster().ID()),
+ memberID: int64(s.ID()),
+
+ maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes),
+
+ sg: s,
+ watchable: s.Watchable(),
+ ag: s,
+ }
+}
+
+var (
+ // External test can read this with GetProgressReportInterval()
+ // and change this to a small value to finish fast with
+ // SetProgressReportInterval().
+ progressReportInterval = 10 * time.Minute
+ progressReportIntervalMu sync.RWMutex
+)
+
+// GetProgressReportInterval returns the current progress report interval (for testing).
+func GetProgressReportInterval() time.Duration {
+ progressReportIntervalMu.RLock()
+ interval := progressReportInterval
+ progressReportIntervalMu.RUnlock()
+
+ // add rand(1/10*progressReportInterval) as jitter so that etcdserver will not
+ // send progress notifications to watchers around the same time even when watchers
+ // are created around the same time (which is common when a client restarts itself).
+ jitter := time.Duration(rand.Int63n(int64(interval) / 10))
+
+ return interval + jitter
+}
+
+// SetProgressReportInterval updates the current progress report interval (for testing).
+func SetProgressReportInterval(newTimeout time.Duration) {
+ progressReportIntervalMu.Lock()
+ progressReportInterval = newTimeout
+ progressReportIntervalMu.Unlock()
+}
+
+// We send ctrl response inside the read loop. We do not want
+// send to block read, but we still want ctrl response we sent to
+// be serialized. Thus we use a buffered chan to solve the problem.
+// A small buffer should be OK for most cases, since we expect the
+// ctrl requests are infrequent.
+const ctrlStreamBufLen = 16
+
+// serverWatchStream is an etcd server side stream. It receives requests
+// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
+// and creates responses that forwarded to gRPC stream.
+// It also forwards control message like watch created and canceled.
+type serverWatchStream struct {
+ lg *zap.Logger
+
+ clusterID int64
+ memberID int64
+
+ maxRequestBytes int
+
+ sg etcdserver.RaftStatusGetter
+ watchable mvcc.WatchableKV
+ ag AuthGetter
+
+ gRPCStream pb.Watch_WatchServer
+ watchStream mvcc.WatchStream
+ ctrlStream chan *pb.WatchResponse
+
+ // mu protects progress, prevKV, fragment
+ mu sync.RWMutex
+ // tracks the watchID that stream might need to send progress to
+ // TODO: combine progress and prevKV into a single struct?
+ progress map[mvcc.WatchID]bool
+ // record watch IDs that need return previous key-value pair
+ prevKV map[mvcc.WatchID]bool
+ // records fragmented watch IDs
+ fragment map[mvcc.WatchID]bool
+
+ // closec indicates the stream is closed.
+ closec chan struct{}
+
+ // wg waits for the send loop to complete
+ wg sync.WaitGroup
+}
+
+func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
+ sws := serverWatchStream{
+ lg: ws.lg,
+
+ clusterID: ws.clusterID,
+ memberID: ws.memberID,
+
+ maxRequestBytes: ws.maxRequestBytes,
+
+ sg: ws.sg,
+ watchable: ws.watchable,
+ ag: ws.ag,
+
+ gRPCStream: stream,
+ watchStream: ws.watchable.NewWatchStream(),
+ // chan for sending control response like watcher created and canceled.
+ ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
+
+ progress: make(map[mvcc.WatchID]bool),
+ prevKV: make(map[mvcc.WatchID]bool),
+ fragment: make(map[mvcc.WatchID]bool),
+
+ closec: make(chan struct{}),
+ }
+
+ sws.wg.Add(1)
+ go func() {
+ sws.sendLoop()
+ sws.wg.Done()
+ }()
+
+ errc := make(chan error, 1)
+ // Ideally recvLoop would also use sws.wg to signal its completion
+ // but when stream.Context().Done() is closed, the stream's recv
+ // may continue to block since it uses a different context, leading to
+ // deadlock when calling sws.close().
+ go func() {
+ if rerr := sws.recvLoop(); rerr != nil {
+ if isClientCtxErr(stream.Context().Err(), rerr) {
+ if sws.lg != nil {
+ sws.lg.Debug("failed to receive watch request from gRPC stream", zap.Error(rerr))
+ } else {
+ plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
+ }
+ } else {
+ if sws.lg != nil {
+ sws.lg.Warn("failed to receive watch request from gRPC stream", zap.Error(rerr))
+ } else {
+ plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
+ }
+ streamFailures.WithLabelValues("receive", "watch").Inc()
+ }
+ errc <- rerr
+ }
+ }()
+
+ select {
+ case err = <-errc:
+ close(sws.ctrlStream)
+
+ case <-stream.Context().Done():
+ err = stream.Context().Err()
+ // the only server-side cancellation is noleader for now.
+ if err == context.Canceled {
+ err = rpctypes.ErrGRPCNoLeader
+ }
+ }
+
+ sws.close()
+ return err
+}
+
+func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
+ authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
+ if err != nil {
+ return false
+ }
+ if authInfo == nil {
+ // if auth is enabled, IsRangePermitted() can cause an error
+ authInfo = &auth.AuthInfo{}
+ }
+ return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
+}
+
+func (sws *serverWatchStream) recvLoop() error {
+ for {
+ req, err := sws.gRPCStream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ switch uv := req.RequestUnion.(type) {
+ case *pb.WatchRequest_CreateRequest:
+ if uv.CreateRequest == nil {
+ break
+ }
+
+ creq := uv.CreateRequest
+ if len(creq.Key) == 0 {
+ // \x00 is the smallest key
+ creq.Key = []byte{0}
+ }
+ if len(creq.RangeEnd) == 0 {
+ // force nil since watchstream.Watch distinguishes
+ // between nil and []byte{} for single key / >=
+ creq.RangeEnd = nil
+ }
+ if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
+ // support >= key queries
+ creq.RangeEnd = []byte{}
+ }
+
+ if !sws.isWatchPermitted(creq) {
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(sws.watchStream.Rev()),
+ WatchId: creq.WatchId,
+ Canceled: true,
+ Created: true,
+ CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
+ }
+
+ select {
+ case sws.ctrlStream <- wr:
+ continue
+ case <-sws.closec:
+ return nil
+ }
+ }
+
+ filters := FiltersFromRequest(creq)
+
+ wsrev := sws.watchStream.Rev()
+ rev := creq.StartRevision
+ if rev == 0 {
+ rev = wsrev + 1
+ }
+ id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), creq.Key, creq.RangeEnd, rev, filters...)
+ if err == nil {
+ sws.mu.Lock()
+ if creq.ProgressNotify {
+ sws.progress[id] = true
+ }
+ if creq.PrevKv {
+ sws.prevKV[id] = true
+ }
+ if creq.Fragment {
+ sws.fragment[id] = true
+ }
+ sws.mu.Unlock()
+ }
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(wsrev),
+ WatchId: int64(id),
+ Created: true,
+ Canceled: err != nil,
+ }
+ if err != nil {
+ wr.CancelReason = err.Error()
+ }
+ select {
+ case sws.ctrlStream <- wr:
+ case <-sws.closec:
+ return nil
+ }
+
+ case *pb.WatchRequest_CancelRequest:
+ if uv.CancelRequest != nil {
+ id := uv.CancelRequest.WatchId
+ err := sws.watchStream.Cancel(mvcc.WatchID(id))
+ if err == nil {
+ sws.ctrlStream <- &pb.WatchResponse{
+ Header: sws.newResponseHeader(sws.watchStream.Rev()),
+ WatchId: id,
+ Canceled: true,
+ }
+ sws.mu.Lock()
+ delete(sws.progress, mvcc.WatchID(id))
+ delete(sws.prevKV, mvcc.WatchID(id))
+ delete(sws.fragment, mvcc.WatchID(id))
+ sws.mu.Unlock()
+ }
+ }
+ case *pb.WatchRequest_ProgressRequest:
+ if uv.ProgressRequest != nil {
+ sws.ctrlStream <- &pb.WatchResponse{
+ Header: sws.newResponseHeader(sws.watchStream.Rev()),
+ WatchId: -1, // response is not associated with any WatchId and will be broadcast to all watch channels
+ }
+ }
+ default:
+ // we probably should not shutdown the entire stream when
+ // receive an valid command.
+ // so just do nothing instead.
+ continue
+ }
+ }
+}
+
+func (sws *serverWatchStream) sendLoop() {
+ // watch ids that are currently active
+ ids := make(map[mvcc.WatchID]struct{})
+ // watch responses pending on a watch id creation message
+ pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
+
+ interval := GetProgressReportInterval()
+ progressTicker := time.NewTicker(interval)
+
+ defer func() {
+ progressTicker.Stop()
+ // drain the chan to clean up pending events
+ for ws := range sws.watchStream.Chan() {
+ mvcc.ReportEventReceived(len(ws.Events))
+ }
+ for _, wrs := range pending {
+ for _, ws := range wrs {
+ mvcc.ReportEventReceived(len(ws.Events))
+ }
+ }
+ }()
+
+ for {
+ select {
+ case wresp, ok := <-sws.watchStream.Chan():
+ if !ok {
+ return
+ }
+
+ // TODO: evs is []mvccpb.Event type
+ // either return []*mvccpb.Event from the mvcc package
+ // or define protocol buffer with []mvccpb.Event.
+ evs := wresp.Events
+ events := make([]*mvccpb.Event, len(evs))
+ sws.mu.RLock()
+ needPrevKV := sws.prevKV[wresp.WatchID]
+ sws.mu.RUnlock()
+ for i := range evs {
+ events[i] = &evs[i]
+ if needPrevKV {
+ opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
+ r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
+ if err == nil && len(r.KVs) != 0 {
+ events[i].PrevKv = &(r.KVs[0])
+ }
+ }
+ }
+
+ canceled := wresp.CompactRevision != 0
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(wresp.Revision),
+ WatchId: int64(wresp.WatchID),
+ Events: events,
+ CompactRevision: wresp.CompactRevision,
+ Canceled: canceled,
+ }
+
+ if _, okID := ids[wresp.WatchID]; !okID {
+ // buffer if id not yet announced
+ wrs := append(pending[wresp.WatchID], wr)
+ pending[wresp.WatchID] = wrs
+ continue
+ }
+
+ mvcc.ReportEventReceived(len(evs))
+
+ sws.mu.RLock()
+ fragmented, ok := sws.fragment[wresp.WatchID]
+ sws.mu.RUnlock()
+
+ var serr error
+ if !fragmented && !ok {
+ serr = sws.gRPCStream.Send(wr)
+ } else {
+ serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send)
+ }
+
+ if serr != nil {
+ if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) {
+ if sws.lg != nil {
+ sws.lg.Debug("failed to send watch response to gRPC stream", zap.Error(serr))
+ } else {
+ plog.Debugf("failed to send watch response to gRPC stream (%q)", serr.Error())
+ }
+ } else {
+ if sws.lg != nil {
+ sws.lg.Warn("failed to send watch response to gRPC stream", zap.Error(serr))
+ } else {
+ plog.Warningf("failed to send watch response to gRPC stream (%q)", serr.Error())
+ }
+ streamFailures.WithLabelValues("send", "watch").Inc()
+ }
+ return
+ }
+
+ sws.mu.Lock()
+ if len(evs) > 0 && sws.progress[wresp.WatchID] {
+ // elide next progress update if sent a key update
+ sws.progress[wresp.WatchID] = false
+ }
+ sws.mu.Unlock()
+
+ case c, ok := <-sws.ctrlStream:
+ if !ok {
+ return
+ }
+
+ if err := sws.gRPCStream.Send(c); err != nil {
+ if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
+ if sws.lg != nil {
+ sws.lg.Debug("failed to send watch control response to gRPC stream", zap.Error(err))
+ } else {
+ plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
+ }
+ } else {
+ if sws.lg != nil {
+ sws.lg.Warn("failed to send watch control response to gRPC stream", zap.Error(err))
+ } else {
+ plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
+ }
+ streamFailures.WithLabelValues("send", "watch").Inc()
+ }
+ return
+ }
+
+ // track id creation
+ wid := mvcc.WatchID(c.WatchId)
+ if c.Canceled {
+ delete(ids, wid)
+ continue
+ }
+ if c.Created {
+ // flush buffered events
+ ids[wid] = struct{}{}
+ for _, v := range pending[wid] {
+ mvcc.ReportEventReceived(len(v.Events))
+ if err := sws.gRPCStream.Send(v); err != nil {
+ if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
+ if sws.lg != nil {
+ sws.lg.Debug("failed to send pending watch response to gRPC stream", zap.Error(err))
+ } else {
+ plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
+ }
+ } else {
+ if sws.lg != nil {
+ sws.lg.Warn("failed to send pending watch response to gRPC stream", zap.Error(err))
+ } else {
+ plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
+ }
+ streamFailures.WithLabelValues("send", "watch").Inc()
+ }
+ return
+ }
+ }
+ delete(pending, wid)
+ }
+
+ case <-progressTicker.C:
+ sws.mu.Lock()
+ for id, ok := range sws.progress {
+ if ok {
+ sws.watchStream.RequestProgress(id)
+ }
+ sws.progress[id] = true
+ }
+ sws.mu.Unlock()
+
+ case <-sws.closec:
+ return
+ }
+ }
+}
+
+func sendFragments(
+ wr *pb.WatchResponse,
+ maxRequestBytes int,
+ sendFunc func(*pb.WatchResponse) error) error {
+ // no need to fragment if total request size is smaller
+ // than max request limit or response contains only one event
+ if wr.Size() < maxRequestBytes || len(wr.Events) < 2 {
+ return sendFunc(wr)
+ }
+
+ ow := *wr
+ ow.Events = make([]*mvccpb.Event, 0)
+ ow.Fragment = true
+
+ var idx int
+ for {
+ cur := ow
+ for _, ev := range wr.Events[idx:] {
+ cur.Events = append(cur.Events, ev)
+ if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes {
+ cur.Events = cur.Events[:len(cur.Events)-1]
+ break
+ }
+ idx++
+ }
+ if idx == len(wr.Events) {
+ // last response has no more fragment
+ cur.Fragment = false
+ }
+ if err := sendFunc(&cur); err != nil {
+ return err
+ }
+ if !cur.Fragment {
+ break
+ }
+ }
+ return nil
+}
+
+func (sws *serverWatchStream) close() {
+ sws.watchStream.Close()
+ close(sws.closec)
+ sws.wg.Wait()
+}
+
+func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(sws.clusterID),
+ MemberId: uint64(sws.memberID),
+ Revision: rev,
+ RaftTerm: sws.sg.Term(),
+ }
+}
+
+func filterNoDelete(e mvccpb.Event) bool {
+ return e.Type == mvccpb.DELETE
+}
+
+func filterNoPut(e mvccpb.Event) bool {
+ return e.Type == mvccpb.PUT
+}
+
+// FiltersFromRequest returns "mvcc.FilterFunc" from a given watch create request.
+func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
+ filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
+ for _, ft := range creq.Filters {
+ switch ft {
+ case pb.WatchCreateRequest_NOPUT:
+ filters = append(filters, filterNoPut)
+ case pb.WatchCreateRequest_NODELETE:
+ filters = append(filters, filterNoDelete)
+ default:
+ }
+ }
+ return filters
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/apply.go b/vendor/go.etcd.io/etcd/etcdserver/apply.go
new file mode 100644
index 000000000000..d98549dea5b0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/apply.go
@@ -0,0 +1,1040 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sort"
+ "time"
+
+ "go.etcd.io/etcd/auth"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/mvcc"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+ "go.etcd.io/etcd/pkg/traceutil"
+ "go.etcd.io/etcd/pkg/types"
+
+ "github.com/gogo/protobuf/proto"
+ "go.uber.org/zap"
+)
+
+const (
+ warnApplyDuration = 100 * time.Millisecond
+)
+
+type applyResult struct {
+ resp proto.Message
+ err error
+ // physc signals the physical effect of the request has completed in addition
+ // to being logically reflected by the node. Currently only used for
+ // Compaction requests.
+ physc <-chan struct{}
+ trace *traceutil.Trace
+}
+
+// applierV3 is the interface for processing V3 raft messages
+type applierV3 interface {
+ Apply(r *pb.InternalRaftRequest) *applyResult
+
+ Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error)
+ Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
+ DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
+ Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
+ Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error)
+
+ LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
+ LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
+
+ LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error)
+
+ Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
+
+ Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
+
+ AuthEnable() (*pb.AuthEnableResponse, error)
+ AuthDisable() (*pb.AuthDisableResponse, error)
+
+ UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+ UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+ UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+ UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+ UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+ UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+ RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+ RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+ RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+ RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+ RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+ UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+ RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+}
+
+type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error
+
+type applierV3backend struct {
+ s *EtcdServer
+
+ checkPut checkReqFunc
+ checkRange checkReqFunc
+}
+
+func (s *EtcdServer) newApplierV3Backend() applierV3 {
+ base := &applierV3backend{s: s}
+ base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error {
+ return base.checkRequestPut(rv, req)
+ }
+ base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error {
+ return base.checkRequestRange(rv, req)
+ }
+ return base
+}
+
+func (s *EtcdServer) newApplierV3() applierV3 {
+ return newAuthApplierV3(
+ s.AuthStore(),
+ newQuotaApplierV3(s, s.newApplierV3Backend()),
+ s.lessor,
+ )
+}
+
+func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
+ ar := &applyResult{}
+ defer func(start time.Time) {
+ warnOfExpensiveRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
+ if ar.err != nil {
+ warnOfFailedRequest(a.s.getLogger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
+ }
+ }(time.Now())
+
+ // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
+ switch {
+ case r.Range != nil:
+ ar.resp, ar.err = a.s.applyV3.Range(context.TODO(), nil, r.Range)
+ case r.Put != nil:
+ ar.resp, ar.trace, ar.err = a.s.applyV3.Put(nil, r.Put)
+ case r.DeleteRange != nil:
+ ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
+ case r.Txn != nil:
+ ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
+ case r.Compaction != nil:
+ ar.resp, ar.physc, ar.trace, ar.err = a.s.applyV3.Compaction(r.Compaction)
+ case r.LeaseGrant != nil:
+ ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
+ case r.LeaseRevoke != nil:
+ ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
+ case r.LeaseCheckpoint != nil:
+ ar.resp, ar.err = a.s.applyV3.LeaseCheckpoint(r.LeaseCheckpoint)
+ case r.Alarm != nil:
+ ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
+ case r.Authenticate != nil:
+ ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
+ case r.AuthEnable != nil:
+ ar.resp, ar.err = a.s.applyV3.AuthEnable()
+ case r.AuthDisable != nil:
+ ar.resp, ar.err = a.s.applyV3.AuthDisable()
+ case r.AuthUserAdd != nil:
+ ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
+ case r.AuthUserDelete != nil:
+ ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
+ case r.AuthUserChangePassword != nil:
+ ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
+ case r.AuthUserGrantRole != nil:
+ ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
+ case r.AuthUserGet != nil:
+ ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
+ case r.AuthUserRevokeRole != nil:
+ ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
+ case r.AuthRoleAdd != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
+ case r.AuthRoleGrantPermission != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
+ case r.AuthRoleGet != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
+ case r.AuthRoleRevokePermission != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
+ case r.AuthRoleDelete != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
+ case r.AuthUserList != nil:
+ ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
+ case r.AuthRoleList != nil:
+ ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
+ default:
+ panic("not implemented")
+ }
+ return ar
+}
+
+func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) {
+ resp = &pb.PutResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ trace = traceutil.New("put",
+ a.s.getLogger(),
+ traceutil.Field{Key: "key", Value: string(p.Key)},
+ traceutil.Field{Key: "req_size", Value: proto.Size(p)},
+ )
+ val, leaseID := p.Value, lease.LeaseID(p.Lease)
+ if txn == nil {
+ if leaseID != lease.NoLease {
+ if l := a.s.lessor.Lookup(leaseID); l == nil {
+ return nil, nil, lease.ErrLeaseNotFound
+ }
+ }
+ txn = a.s.KV().Write(trace)
+ defer txn.End()
+ }
+
+ var rr *mvcc.RangeResult
+ if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
+ trace.DisableStep()
+ rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
+ if err != nil {
+ return nil, nil, err
+ }
+ trace.EnableStep()
+ trace.Step("get previous kv pair")
+ }
+ if p.IgnoreValue || p.IgnoreLease {
+ if rr == nil || len(rr.KVs) == 0 {
+ // ignore_{lease,value} flag expects previous key-value pair
+ return nil, nil, ErrKeyNotFound
+ }
+ }
+ if p.IgnoreValue {
+ val = rr.KVs[0].Value
+ }
+ if p.IgnoreLease {
+ leaseID = lease.LeaseID(rr.KVs[0].Lease)
+ }
+ if p.PrevKv {
+ if rr != nil && len(rr.KVs) != 0 {
+ resp.PrevKv = &rr.KVs[0]
+ }
+ }
+
+ resp.Header.Revision = txn.Put(p.Key, val, leaseID)
+ trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
+ return resp, trace, nil
+}
+
+func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ resp := &pb.DeleteRangeResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ end := mkGteRange(dr.RangeEnd)
+
+ if txn == nil {
+ txn = a.s.kv.Write(traceutil.TODO())
+ defer txn.End()
+ }
+
+ if dr.PrevKv {
+ rr, err := txn.Range(dr.Key, end, mvcc.RangeOptions{})
+ if err != nil {
+ return nil, err
+ }
+ if rr != nil {
+ resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+ for i := range rr.KVs {
+ resp.PrevKvs[i] = &rr.KVs[i]
+ }
+ }
+ }
+
+ resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end)
+ return resp, nil
+}
+
+func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ trace := traceutil.Get(ctx)
+
+ resp := &pb.RangeResponse{}
+ resp.Header = &pb.ResponseHeader{}
+
+ if txn == nil {
+ txn = a.s.kv.Read(trace)
+ defer txn.End()
+ }
+
+ limit := r.Limit
+ if r.SortOrder != pb.RangeRequest_NONE ||
+ r.MinModRevision != 0 || r.MaxModRevision != 0 ||
+ r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
+ // fetch everything; sort and truncate afterwards
+ limit = 0
+ }
+ if limit > 0 {
+ // fetch one extra for 'more' flag
+ limit = limit + 1
+ }
+
+ ro := mvcc.RangeOptions{
+ Limit: limit,
+ Rev: r.Revision,
+ Count: r.CountOnly,
+ }
+
+ rr, err := txn.Range(r.Key, mkGteRange(r.RangeEnd), ro)
+ if err != nil {
+ return nil, err
+ }
+
+ if r.MaxModRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MinModRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MaxCreateRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MinCreateRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
+ pruneKVs(rr, f)
+ }
+
+ sortOrder := r.SortOrder
+ if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
+ // Since current mvcc.Range implementation returns results
+ // sorted by keys in lexiographically ascending order,
+ // sort ASCEND by default only when target is not 'KEY'
+ sortOrder = pb.RangeRequest_ASCEND
+ }
+ if sortOrder != pb.RangeRequest_NONE {
+ var sorter sort.Interface
+ switch {
+ case r.SortTarget == pb.RangeRequest_KEY:
+ sorter = &kvSortByKey{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_VERSION:
+ sorter = &kvSortByVersion{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_CREATE:
+ sorter = &kvSortByCreate{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_MOD:
+ sorter = &kvSortByMod{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_VALUE:
+ sorter = &kvSortByValue{&kvSort{rr.KVs}}
+ }
+ switch {
+ case sortOrder == pb.RangeRequest_ASCEND:
+ sort.Sort(sorter)
+ case sortOrder == pb.RangeRequest_DESCEND:
+ sort.Sort(sort.Reverse(sorter))
+ }
+ }
+
+ if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
+ rr.KVs = rr.KVs[:r.Limit]
+ resp.More = true
+ }
+ trace.Step("filter and sort the key-value pairs")
+ resp.Header.Revision = rr.Rev
+ resp.Count = int64(rr.Count)
+ resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+ for i := range rr.KVs {
+ if r.KeysOnly {
+ rr.KVs[i].Value = nil
+ }
+ resp.Kvs[i] = &rr.KVs[i]
+ }
+ trace.Step("assemble the response")
+ return resp, nil
+}
+
+func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
+ isWrite := !isTxnReadonly(rt)
+ txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(traceutil.TODO()))
+
+ txnPath := compareToPath(txn, rt)
+ if isWrite {
+ if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil {
+ txn.End()
+ return nil, err
+ }
+ }
+ if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil {
+ txn.End()
+ return nil, err
+ }
+
+ txnResp, _ := newTxnResp(rt, txnPath)
+
+ // When executing mutable txn ops, etcd must hold the txn lock so
+ // readers do not see any intermediate results. Since writes are
+ // serialized on the raft loop, the revision in the read view will
+ // be the revision of the write txn.
+ if isWrite {
+ txn.End()
+ txn = a.s.KV().Write(traceutil.TODO())
+ }
+ a.applyTxn(txn, rt, txnPath, txnResp)
+ rev := txn.Rev()
+ if len(txn.Changes()) != 0 {
+ rev++
+ }
+ txn.End()
+
+ txnResp.Header.Revision = rev
+ return txnResp, nil
+}
+
+// newTxnResp allocates a txn response for a txn request given a path.
+func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) {
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+ resps := make([]*pb.ResponseOp, len(reqs))
+ txnResp = &pb.TxnResponse{
+ Responses: resps,
+ Succeeded: txnPath[0],
+ Header: &pb.ResponseHeader{},
+ }
+ for i, req := range reqs {
+ switch tv := req.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}}
+ case *pb.RequestOp_RequestPut:
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}}
+ case *pb.RequestOp_RequestDeleteRange:
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}}
+ case *pb.RequestOp_RequestTxn:
+ resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:])
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}}
+ txnPath = txnPath[1+txns:]
+ txnCount += txns + 1
+ default:
+ }
+ }
+ return txnResp, txnCount
+}
+
+func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool {
+ txnPath := make([]bool, 1)
+ ops := rt.Success
+ if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] {
+ ops = rt.Failure
+ }
+ for _, op := range ops {
+ tv, ok := op.Request.(*pb.RequestOp_RequestTxn)
+ if !ok || tv.RequestTxn == nil {
+ continue
+ }
+ txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...)
+ }
+ return txnPath
+}
+
+func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool {
+ for _, c := range cmps {
+ if !applyCompare(rv, c) {
+ return false
+ }
+ }
+ return true
+}
+
+// applyCompare applies the compare request.
+// If the comparison succeeds, it returns true. Otherwise, returns false.
+func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
+ // TODO: possible optimizations
+ // * chunk reads for large ranges to conserve memory
+ // * rewrite rules for common patterns:
+ // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0"
+ // * caching
+ rr, err := rv.Range(c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{})
+ if err != nil {
+ return false
+ }
+ if len(rr.KVs) == 0 {
+ if c.Target == pb.Compare_VALUE {
+ // Always fail if comparing a value on a key/keys that doesn't exist;
+ // nil == empty string in grpc; no way to represent missing value
+ return false
+ }
+ return compareKV(c, mvccpb.KeyValue{})
+ }
+ for _, kv := range rr.KVs {
+ if !compareKV(c, kv) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
+ var result int
+ rev := int64(0)
+ switch c.Target {
+ case pb.Compare_VALUE:
+ v := []byte{}
+ if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil {
+ v = tv.Value
+ }
+ result = bytes.Compare(ckv.Value, v)
+ case pb.Compare_CREATE:
+ if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil {
+ rev = tv.CreateRevision
+ }
+ result = compareInt64(ckv.CreateRevision, rev)
+ case pb.Compare_MOD:
+ if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil {
+ rev = tv.ModRevision
+ }
+ result = compareInt64(ckv.ModRevision, rev)
+ case pb.Compare_VERSION:
+ if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil {
+ rev = tv.Version
+ }
+ result = compareInt64(ckv.Version, rev)
+ case pb.Compare_LEASE:
+ if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil {
+ rev = tv.Lease
+ }
+ result = compareInt64(ckv.Lease, rev)
+ }
+ switch c.Result {
+ case pb.Compare_EQUAL:
+ return result == 0
+ case pb.Compare_NOT_EQUAL:
+ return result != 0
+ case pb.Compare_GREATER:
+ return result > 0
+ case pb.Compare_LESS:
+ return result < 0
+ }
+ return true
+}
+
+func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) {
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+
+ lg := a.s.getLogger()
+ for i, req := range reqs {
+ respi := tresp.Responses[i].Response
+ switch tv := req.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ resp, err := a.Range(context.TODO(), txn, tv.RequestRange)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("unexpected error during txn", zap.Error(err))
+ } else {
+ plog.Panicf("unexpected error during txn: %v", err)
+ }
+ }
+ respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp
+ case *pb.RequestOp_RequestPut:
+ resp, _, err := a.Put(txn, tv.RequestPut)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("unexpected error during txn", zap.Error(err))
+ } else {
+ plog.Panicf("unexpected error during txn: %v", err)
+ }
+ }
+ respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp
+ case *pb.RequestOp_RequestDeleteRange:
+ resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("unexpected error during txn", zap.Error(err))
+ } else {
+ plog.Panicf("unexpected error during txn: %v", err)
+ }
+ }
+ respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp
+ case *pb.RequestOp_RequestTxn:
+ resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn
+ applyTxns := a.applyTxn(txn, tv.RequestTxn, txnPath[1:], resp)
+ txns += applyTxns + 1
+ txnPath = txnPath[applyTxns+1:]
+ default:
+ // empty union
+ }
+ }
+ return txns
+}
+
+func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
+ resp := &pb.CompactionResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ trace := traceutil.New("compact",
+ a.s.getLogger(),
+ traceutil.Field{Key: "revision", Value: compaction.Revision},
+ )
+
+ ch, err := a.s.KV().Compact(trace, compaction.Revision)
+ if err != nil {
+ return nil, ch, nil, err
+ }
+ // get the current revision. which key to get is not important.
+ rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
+ resp.Header.Revision = rr.Rev
+ return resp, ch, trace, err
+}
+
+func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
+ resp := &pb.LeaseGrantResponse{}
+ if err == nil {
+ resp.ID = int64(l.ID)
+ resp.TTL = l.TTL()
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
+ return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
+}
+
+func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) {
+ for _, c := range lc.Checkpoints {
+ err := a.s.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL)
+ if err != nil {
+ return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, err
+ }
+ }
+ return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, nil
+}
+
+func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp := &pb.AlarmResponse{}
+ oldCount := len(a.s.alarmStore.Get(ar.Alarm))
+
+ lg := a.s.getLogger()
+ switch ar.Action {
+ case pb.AlarmRequest_GET:
+ resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
+ case pb.AlarmRequest_ACTIVATE:
+ m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
+ if m == nil {
+ break
+ }
+ resp.Alarms = append(resp.Alarms, m)
+ activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1
+ if !activated {
+ break
+ }
+
+ if lg != nil {
+ lg.Warn("alarm raised", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String()))
+ } else {
+ plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID))
+ }
+ switch m.Alarm {
+ case pb.AlarmType_CORRUPT:
+ a.s.applyV3 = newApplierV3Corrupt(a)
+ case pb.AlarmType_NOSPACE:
+ a.s.applyV3 = newApplierV3Capped(a)
+ default:
+ if lg != nil {
+ lg.Warn("unimplemented alarm activation", zap.String("alarm", fmt.Sprintf("%+v", m)))
+ } else {
+ plog.Errorf("unimplemented alarm activation (%+v)", m)
+ }
+ }
+ case pb.AlarmRequest_DEACTIVATE:
+ m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
+ if m == nil {
+ break
+ }
+ resp.Alarms = append(resp.Alarms, m)
+ deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0
+ if !deactivated {
+ break
+ }
+
+ switch m.Alarm {
+ case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT:
+ // TODO: check kv hash before deactivating CORRUPT?
+ if lg != nil {
+ lg.Warn("alarm disarmed", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String()))
+ } else {
+ plog.Infof("alarm disarmed %+v", ar)
+ }
+ a.s.applyV3 = a.s.newApplierV3()
+ default:
+ if lg != nil {
+ lg.Warn("unimplemented alarm deactivation", zap.String("alarm", fmt.Sprintf("%+v", m)))
+ } else {
+ plog.Errorf("unimplemented alarm deactivation (%+v)", m)
+ }
+ }
+ default:
+ return nil, nil
+ }
+ return resp, nil
+}
+
+type applierV3Capped struct {
+ applierV3
+ q backendQuota
+}
+
+// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
+// with Puts so that the number of keys in the store is capped.
+func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
+
+func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ return nil, nil, ErrNoSpace
+}
+
+func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if a.q.Cost(r) > 0 {
+ return nil, ErrNoSpace
+ }
+ return a.applierV3.Txn(r)
+}
+
+func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return nil, ErrNoSpace
+}
+
+func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
+ err := a.s.AuthStore().AuthEnable()
+ if err != nil {
+ return nil, err
+ }
+ return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
+}
+
+func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
+ a.s.AuthStore().AuthDisable()
+ return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
+}
+
+func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken)
+ resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ resp, err := a.s.AuthStore().UserAdd(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := a.s.AuthStore().UserDelete(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ resp, err := a.s.AuthStore().UserChangePassword(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := a.s.AuthStore().UserGrantRole(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := a.s.AuthStore().UserGet(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := a.s.AuthStore().UserRevokeRole(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := a.s.AuthStore().RoleAdd(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := a.s.AuthStore().RoleGrantPermission(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := a.s.AuthStore().RoleGet(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := a.s.AuthStore().RoleRevokePermission(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := a.s.AuthStore().RoleDelete(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := a.s.AuthStore().UserList(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := a.s.AuthStore().RoleList(r)
+ if resp != nil {
+ resp.Header = newHeader(a.s)
+ }
+ return resp, err
+}
+
+type quotaApplierV3 struct {
+ applierV3
+ q Quota
+}
+
+func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
+ return "aApplierV3{app, NewBackendQuota(s, "v3-applier")}
+}
+
+func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ ok := a.q.Available(p)
+ resp, trace, err := a.applierV3.Put(txn, p)
+ if err == nil && !ok {
+ err = ErrNoSpace
+ }
+ return resp, trace, err
+}
+
+func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
+ ok := a.q.Available(rt)
+ resp, err := a.applierV3.Txn(rt)
+ if err == nil && !ok {
+ err = ErrNoSpace
+ }
+ return resp, err
+}
+
+func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ ok := a.q.Available(lc)
+ resp, err := a.applierV3.LeaseGrant(lc)
+ if err == nil && !ok {
+ err = ErrNoSpace
+ }
+ return resp, err
+}
+
+type kvSort struct{ kvs []mvccpb.KeyValue }
+
+func (s *kvSort) Swap(i, j int) {
+ t := s.kvs[i]
+ s.kvs[i] = s.kvs[j]
+ s.kvs[j] = t
+}
+func (s *kvSort) Len() int { return len(s.kvs) }
+
+type kvSortByKey struct{ *kvSort }
+
+func (s *kvSortByKey) Less(i, j int) bool {
+ return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
+}
+
+type kvSortByVersion struct{ *kvSort }
+
+func (s *kvSortByVersion) Less(i, j int) bool {
+ return (s.kvs[i].Version - s.kvs[j].Version) < 0
+}
+
+type kvSortByCreate struct{ *kvSort }
+
+func (s *kvSortByCreate) Less(i, j int) bool {
+ return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
+}
+
+type kvSortByMod struct{ *kvSort }
+
+func (s *kvSortByMod) Less(i, j int) bool {
+ return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
+}
+
+type kvSortByValue struct{ *kvSort }
+
+func (s *kvSortByValue) Less(i, j int) bool {
+ return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
+}
+
+func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) {
+ txnCount := 0
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+ for _, req := range reqs {
+ if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil {
+ txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f)
+ if err != nil {
+ return 0, err
+ }
+ txnCount += txns + 1
+ txnPath = txnPath[txns+1:]
+ continue
+ }
+ if err := f(rv, req); err != nil {
+ return 0, err
+ }
+ }
+ return txnCount, nil
+}
+
+func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
+ tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut)
+ if !ok || tv.RequestPut == nil {
+ return nil
+ }
+ req := tv.RequestPut
+ if req.IgnoreValue || req.IgnoreLease {
+ // expects previous key-value, error if not exist
+ rr, err := rv.Range(req.Key, nil, mvcc.RangeOptions{})
+ if err != nil {
+ return err
+ }
+ if rr == nil || len(rr.KVs) == 0 {
+ return ErrKeyNotFound
+ }
+ }
+ if lease.LeaseID(req.Lease) != lease.NoLease {
+ if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil {
+ return lease.ErrLeaseNotFound
+ }
+ }
+ return nil
+}
+
+func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
+ tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange)
+ if !ok || tv.RequestRange == nil {
+ return nil
+ }
+ req := tv.RequestRange
+ switch {
+ case req.Revision == 0:
+ return nil
+ case req.Revision > rv.Rev():
+ return mvcc.ErrFutureRev
+ case req.Revision < rv.FirstRev():
+ return mvcc.ErrCompacted
+ }
+ return nil
+}
+
+func compareInt64(a, b int64) int {
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+}
+
+// mkGteRange determines if the range end is a >= range. This works around grpc
+// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
+// If it is a GTE range, then []byte{} is returned to indicate the empty byte
+// string (vs nil being no byte string).
+func mkGteRange(rangeEnd []byte) []byte {
+ if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
+ return []byte{}
+ }
+ return rangeEnd
+}
+
+func noSideEffect(r *pb.InternalRaftRequest) bool {
+ return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
+}
+
+func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
+ f := func(ops []*pb.RequestOp) []*pb.RequestOp {
+ j := 0
+ for i := 0; i < len(ops); i++ {
+ if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
+ continue
+ }
+ ops[j] = ops[i]
+ j++
+ }
+
+ return ops[:j]
+ }
+
+ txn.Success = f(txn.Success)
+ txn.Failure = f(txn.Failure)
+}
+
+func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
+ j := 0
+ for i := range rr.KVs {
+ rr.KVs[j] = rr.KVs[i]
+ if !isPrunable(&rr.KVs[i]) {
+ j++
+ }
+ }
+ rr.KVs = rr.KVs[:j]
+}
+
+func newHeader(s *EtcdServer) *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(s.Cluster().ID()),
+ MemberId: uint64(s.ID()),
+ Revision: s.KV().Rev(),
+ RaftTerm: s.Term(),
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/apply_auth.go b/vendor/go.etcd.io/etcd/etcdserver/apply_auth.go
new file mode 100644
index 000000000000..269af4758cd4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/apply_auth.go
@@ -0,0 +1,244 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "sync"
+
+ "go.etcd.io/etcd/auth"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/mvcc"
+ "go.etcd.io/etcd/pkg/traceutil"
+)
+
+type authApplierV3 struct {
+ applierV3
+ as auth.AuthStore
+ lessor lease.Lessor
+
+ // mu serializes Apply so that user isn't corrupted and so that
+ // serialized requests don't leak data from TOCTOU errors
+ mu sync.Mutex
+
+ authInfo auth.AuthInfo
+}
+
+func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 {
+ return &authApplierV3{applierV3: base, as: as, lessor: lessor}
+}
+
+func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
+ aa.mu.Lock()
+ defer aa.mu.Unlock()
+ if r.Header != nil {
+ // backward-compatible with pre-3.0 releases when internalRaftRequest
+ // does not have header field
+ aa.authInfo.Username = r.Header.Username
+ aa.authInfo.Revision = r.Header.AuthRevision
+ }
+ if needAdminPermission(r) {
+ if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &applyResult{err: err}
+ }
+ }
+ ret := aa.applierV3.Apply(r)
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return ret
+}
+
+func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
+ return nil, nil, err
+ }
+
+ if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil {
+ // The specified lease is already attached with a key that cannot
+ // be written by this user. It means the user cannot revoke the
+ // lease so attaching the lease to the newly written key should
+ // be forbidden.
+ return nil, nil, err
+ }
+
+ if r.PrevKv {
+ err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return aa.applierV3.Put(txn, r)
+}
+
+func (aa *authApplierV3) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
+ return nil, err
+ }
+ return aa.applierV3.Range(ctx, txn, r)
+}
+
+func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
+ return nil, err
+ }
+ if r.PrevKv {
+ err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return aa.applierV3.DeleteRange(txn, r)
+}
+
+func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
+ for _, requ := range reqs {
+ switch tv := requ.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ if tv.RequestRange == nil {
+ continue
+ }
+
+ if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
+ return err
+ }
+
+ case *pb.RequestOp_RequestPut:
+ if tv.RequestPut == nil {
+ continue
+ }
+
+ if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
+ return err
+ }
+
+ case *pb.RequestOp_RequestDeleteRange:
+ if tv.RequestDeleteRange == nil {
+ continue
+ }
+
+ if tv.RequestDeleteRange.PrevKv {
+ err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
+ if err != nil {
+ return err
+ }
+ }
+
+ err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
+ for _, c := range rt.Compare {
+ if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil {
+ return err
+ }
+ }
+ if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
+ return err
+ }
+ return checkTxnReqsPermission(as, ai, rt.Failure)
+}
+
+func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
+ return nil, err
+ }
+ return aa.applierV3.Txn(rt)
+}
+
+func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil {
+ return nil, err
+ }
+ return aa.applierV3.LeaseRevoke(lc)
+}
+
+func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
+ lease := aa.lessor.Lookup(leaseID)
+ if lease != nil {
+ for _, key := range lease.Keys() {
+ if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ err := aa.as.IsAdminPermitted(&aa.authInfo)
+ if err != nil && r.Name != aa.authInfo.Username {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &pb.AuthUserGetResponse{}, err
+ }
+
+ return aa.applierV3.UserGet(r)
+}
+
+func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ err := aa.as.IsAdminPermitted(&aa.authInfo)
+ if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &pb.AuthRoleGetResponse{}, err
+ }
+
+ return aa.applierV3.RoleGet(r)
+}
+
+func needAdminPermission(r *pb.InternalRaftRequest) bool {
+ switch {
+ case r.AuthEnable != nil:
+ return true
+ case r.AuthDisable != nil:
+ return true
+ case r.AuthUserAdd != nil:
+ return true
+ case r.AuthUserDelete != nil:
+ return true
+ case r.AuthUserChangePassword != nil:
+ return true
+ case r.AuthUserGrantRole != nil:
+ return true
+ case r.AuthUserRevokeRole != nil:
+ return true
+ case r.AuthRoleAdd != nil:
+ return true
+ case r.AuthRoleGrantPermission != nil:
+ return true
+ case r.AuthRoleRevokePermission != nil:
+ return true
+ case r.AuthRoleDelete != nil:
+ return true
+ case r.AuthUserList != nil:
+ return true
+ case r.AuthRoleList != nil:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/apply_v2.go b/vendor/go.etcd.io/etcd/etcdserver/apply_v2.go
new file mode 100644
index 000000000000..c77df1970617
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/apply_v2.go
@@ -0,0 +1,147 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "encoding/json"
+ "path"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/etcdserver/api/v2store"
+ "go.etcd.io/etcd/pkg/pbutil"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+// ApplierV2 is the interface for processing V2 raft messages
+type ApplierV2 interface {
+ Delete(r *RequestV2) Response
+ Post(r *RequestV2) Response
+ Put(r *RequestV2) Response
+ QGet(r *RequestV2) Response
+ Sync(r *RequestV2) Response
+}
+
+func NewApplierV2(lg *zap.Logger, s v2store.Store, c *membership.RaftCluster) ApplierV2 {
+ return &applierV2store{lg: lg, store: s, cluster: c}
+}
+
+type applierV2store struct {
+ lg *zap.Logger
+ store v2store.Store
+ cluster *membership.RaftCluster
+}
+
+func (a *applierV2store) Delete(r *RequestV2) Response {
+ switch {
+ case r.PrevIndex > 0 || r.PrevValue != "":
+ return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
+ default:
+ return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
+ }
+}
+
+func (a *applierV2store) Post(r *RequestV2) Response {
+ return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, r.TTLOptions()))
+}
+
+func (a *applierV2store) Put(r *RequestV2) Response {
+ ttlOptions := r.TTLOptions()
+ exists, existsSet := pbutil.GetBool(r.PrevExist)
+ switch {
+ case existsSet:
+ if exists {
+ if r.PrevIndex == 0 && r.PrevValue == "" {
+ return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
+ }
+ return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
+ }
+ return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
+ case r.PrevIndex > 0 || r.PrevValue != "":
+ return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
+ default:
+ if storeMemberAttributeRegexp.MatchString(r.Path) {
+ id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
+ var attr membership.Attributes
+ if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
+ if a.lg != nil {
+ a.lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err))
+ } else {
+ plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
+ }
+ }
+ if a.cluster != nil {
+ a.cluster.UpdateAttributes(id, attr)
+ }
+ // return an empty response since there is no consumer.
+ return Response{}
+ }
+ if r.Path == membership.StoreClusterVersionKey() {
+ if a.cluster != nil {
+ a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability)
+ }
+ // return an empty response since there is no consumer.
+ return Response{}
+ }
+ return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
+ }
+}
+
+func (a *applierV2store) QGet(r *RequestV2) Response {
+ return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
+}
+
+func (a *applierV2store) Sync(r *RequestV2) Response {
+ a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
+ return Response{}
+}
+
+// applyV2Request interprets r as a call to v2store.X
+// and returns a Response interpreted from v2store.Event
+func (s *EtcdServer) applyV2Request(r *RequestV2) Response {
+ defer warnOfExpensiveRequest(s.getLogger(), time.Now(), r, nil, nil)
+
+ switch r.Method {
+ case "POST":
+ return s.applyV2.Post(r)
+ case "PUT":
+ return s.applyV2.Put(r)
+ case "DELETE":
+ return s.applyV2.Delete(r)
+ case "QGET":
+ return s.applyV2.QGet(r)
+ case "SYNC":
+ return s.applyV2.Sync(r)
+ default:
+ // This should never be reached, but just in case:
+ return Response{Err: ErrUnknownMethod}
+ }
+}
+
+func (r *RequestV2) TTLOptions() v2store.TTLOptionSet {
+ refresh, _ := pbutil.GetBool(r.Refresh)
+ ttlOptions := v2store.TTLOptionSet{Refresh: refresh}
+ if r.Expiration != 0 {
+ ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
+ }
+ return ttlOptions
+}
+
+func toResponse(ev *v2store.Event, err error) Response {
+ return Response{Event: ev, Err: err}
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/backend.go b/vendor/go.etcd.io/etcd/etcdserver/backend.go
new file mode 100644
index 000000000000..3eace1a33c69
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/backend.go
@@ -0,0 +1,112 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/snap"
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/mvcc"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/raft/raftpb"
+
+ "go.uber.org/zap"
+)
+
+func newBackend(cfg ServerConfig) backend.Backend {
+ bcfg := backend.DefaultBackendConfig()
+ bcfg.Path = cfg.backendPath()
+ if cfg.BackendBatchLimit != 0 {
+ bcfg.BatchLimit = cfg.BackendBatchLimit
+ if cfg.Logger != nil {
+ cfg.Logger.Info("setting backend batch limit", zap.Int("batch limit", cfg.BackendBatchLimit))
+ }
+ }
+ if cfg.BackendBatchInterval != 0 {
+ bcfg.BatchInterval = cfg.BackendBatchInterval
+ if cfg.Logger != nil {
+ cfg.Logger.Info("setting backend batch interval", zap.Duration("batch interval", cfg.BackendBatchInterval))
+ }
+ }
+ bcfg.BackendFreelistType = cfg.BackendFreelistType
+ bcfg.Logger = cfg.Logger
+ if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
+ // permit 10% excess over quota for disarm
+ bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
+ }
+ return backend.New(bcfg)
+}
+
+// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
+func openSnapshotBackend(cfg ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
+ snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find database snapshot file (%v)", err)
+ }
+ if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
+ return nil, fmt.Errorf("failed to rename database snapshot file (%v)", err)
+ }
+ return openBackend(cfg), nil
+}
+
+// openBackend returns a backend using the current etcd db.
+func openBackend(cfg ServerConfig) backend.Backend {
+ fn := cfg.backendPath()
+
+ now, beOpened := time.Now(), make(chan backend.Backend)
+ go func() {
+ beOpened <- newBackend(cfg)
+ }()
+
+ select {
+ case be := <-beOpened:
+ if cfg.Logger != nil {
+ cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now)))
+ }
+ return be
+
+ case <-time.After(10 * time.Second):
+ if cfg.Logger != nil {
+ cfg.Logger.Info(
+ "db file is flocked by another process, or taking too long",
+ zap.String("path", fn),
+ zap.Duration("took", time.Since(now)),
+ )
+ } else {
+ plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn)
+ plog.Warningf("waiting for it to exit before starting...")
+ }
+ }
+
+ return <-beOpened
+}
+
+// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
+// before updating the backend db after persisting raft snapshot to disk,
+// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
+// case, replace the db with the snapshot db sent by the leader.
+func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
+ var cIndex consistentIndex
+ kv := mvcc.New(cfg.Logger, oldbe, &lease.FakeLessor{}, nil, &cIndex, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit})
+ defer kv.Close()
+ if snapshot.Metadata.Index <= kv.ConsistentIndex() {
+ return oldbe, nil
+ }
+ oldbe.Close()
+ return openSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/cluster_util.go b/vendor/go.etcd.io/etcd/etcdserver/cluster_util.go
new file mode 100644
index 000000000000..f92706cb7a14
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/cluster_util.go
@@ -0,0 +1,407 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "sort"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/version"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+// isMemberBootstrapped tries to check if the given member has been bootstrapped
+// in the given cluster.
+func isMemberBootstrapped(lg *zap.Logger, cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
+ rcl, err := getClusterFromRemotePeers(lg, getRemotePeerURLs(cl, member), timeout, false, rt)
+ if err != nil {
+ return false
+ }
+ id := cl.MemberByName(member).ID
+ m := rcl.Member(id)
+ if m == nil {
+ return false
+ }
+ if len(m.ClientURLs) > 0 {
+ return true
+ }
+ return false
+}
+
+// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
+// attempts to construct a Cluster by accessing the members endpoint on one of
+// these URLs. The first URL to provide a response is used. If no URLs provide
+// a response, or a Cluster cannot be successfully created from a received
+// response, an error is returned.
+// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
+// 10 second is enough for building connection and finishing request.
+func GetClusterFromRemotePeers(lg *zap.Logger, urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
+ return getClusterFromRemotePeers(lg, urls, 10*time.Second, true, rt)
+}
+
+// If logerr is true, it prints out more error messages.
+func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
+ cc := &http.Client{
+ Transport: rt,
+ Timeout: timeout,
+ }
+ for _, u := range urls {
+ addr := u + "/members"
+ resp, err := cc.Get(addr)
+ if err != nil {
+ if logerr {
+ if lg != nil {
+ lg.Warn("failed to get cluster response", zap.String("address", addr), zap.Error(err))
+ } else {
+ plog.Warningf("could not get cluster response from %s: %v", u, err)
+ }
+ }
+ continue
+ }
+ b, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ if logerr {
+ if lg != nil {
+ lg.Warn("failed to read body of cluster response", zap.String("address", addr), zap.Error(err))
+ } else {
+ plog.Warningf("could not read the body of cluster response: %v", err)
+ }
+ }
+ continue
+ }
+ var membs []*membership.Member
+ if err = json.Unmarshal(b, &membs); err != nil {
+ if logerr {
+ if lg != nil {
+ lg.Warn("failed to unmarshal cluster response", zap.String("address", addr), zap.Error(err))
+ } else {
+ plog.Warningf("could not unmarshal cluster response: %v", err)
+ }
+ }
+ continue
+ }
+ id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
+ if err != nil {
+ if logerr {
+ if lg != nil {
+ lg.Warn(
+ "failed to parse cluster ID",
+ zap.String("address", addr),
+ zap.String("header", resp.Header.Get("X-Etcd-Cluster-ID")),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("could not parse the cluster ID from cluster res: %v", err)
+ }
+ }
+ continue
+ }
+
+ // check the length of membership members
+ // if the membership members are present then prepare and return raft cluster
+ // if membership members are not present then the raft cluster formed will be
+ // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
+ if len(membs) > 0 {
+ return membership.NewClusterFromMembers(lg, "", id, membs), nil
+ }
+ return nil, fmt.Errorf("failed to get raft cluster member(s) from the given URLs")
+ }
+ return nil, fmt.Errorf("could not retrieve cluster information from the given URLs")
+}
+
+// getRemotePeerURLs returns peer urls of remote members in the cluster. The
+// returned list is sorted in ascending lexicographical order.
+func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
+ us := make([]string, 0)
+ for _, m := range cl.Members() {
+ if m.Name == local {
+ continue
+ }
+ us = append(us, m.PeerURLs...)
+ }
+ sort.Strings(us)
+ return us
+}
+
+// getVersions returns the versions of the members in the given cluster.
+// The key of the returned map is the member's ID. The value of the returned map
+// is the semver versions string, including server and cluster.
+// If it fails to get the version of a member, the key will be nil.
+func getVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
+ members := cl.Members()
+ vers := make(map[string]*version.Versions)
+ for _, m := range members {
+ if m.ID == local {
+ cv := "not_decided"
+ if cl.Version() != nil {
+ cv = cl.Version().String()
+ }
+ vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
+ continue
+ }
+ ver, err := getVersion(lg, m, rt)
+ if err != nil {
+ if lg != nil {
+ lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err))
+ } else {
+ plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
+ }
+ vers[m.ID.String()] = nil
+ } else {
+ vers[m.ID.String()] = ver
+ }
+ }
+ return vers
+}
+
+// decideClusterVersion decides the cluster version based on the versions map.
+// The returned version is the min server version in the map, or nil if the min
+// version in unknown.
+func decideClusterVersion(lg *zap.Logger, vers map[string]*version.Versions) *semver.Version {
+ var cv *semver.Version
+ lv := semver.Must(semver.NewVersion(version.Version))
+
+ for mid, ver := range vers {
+ if ver == nil {
+ return nil
+ }
+ v, err := semver.NewVersion(ver.Server)
+ if err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to parse server version of remote member",
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
+ }
+ return nil
+ }
+ if lv.LessThan(*v) {
+ if lg != nil {
+ lg.Warn(
+ "leader found higher-versioned member",
+ zap.String("local-member-version", lv.String()),
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ )
+ } else {
+ plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
+ plog.Warningf("member %s has a higher version %s", mid, ver.Server)
+ }
+ }
+ if cv == nil {
+ cv = v
+ } else if v.LessThan(*cv) {
+ cv = v
+ }
+ }
+ return cv
+}
+
+// isCompatibleWithCluster return true if the local member has a compatible version with
+// the current running cluster.
+// The version is considered as compatible when at least one of the other members in the cluster has a
+// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version
+// out of the range.
+// We set this rule since when the local member joins, another member might be offline.
+func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
+ vers := getVersions(lg, cl, local, rt)
+ minV := semver.Must(semver.NewVersion(version.MinClusterVersion))
+ maxV := semver.Must(semver.NewVersion(version.Version))
+ maxV = &semver.Version{
+ Major: maxV.Major,
+ Minor: maxV.Minor,
+ }
+ return isCompatibleWithVers(lg, vers, local, minV, maxV)
+}
+
+func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
+ var ok bool
+ for id, v := range vers {
+ // ignore comparison with local version
+ if id == local.String() {
+ continue
+ }
+ if v == nil {
+ continue
+ }
+ clusterv, err := semver.NewVersion(v.Cluster)
+ if err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to parse cluster version of remote member",
+ zap.String("remote-member-id", id),
+ zap.String("remote-member-cluster-version", v.Cluster),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err)
+ }
+ continue
+ }
+ if clusterv.LessThan(*minV) {
+ if lg != nil {
+ lg.Warn(
+ "cluster version of remote member is not compatible; too low",
+ zap.String("remote-member-id", id),
+ zap.String("remote-member-cluster-version", clusterv.String()),
+ zap.String("minimum-cluster-version-supported", minV.String()),
+ )
+ } else {
+ plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String())
+ }
+ return false
+ }
+ if maxV.LessThan(*clusterv) {
+ if lg != nil {
+ lg.Warn(
+ "cluster version of remote member is not compatible; too high",
+ zap.String("remote-member-id", id),
+ zap.String("remote-member-cluster-version", clusterv.String()),
+ zap.String("minimum-cluster-version-supported", minV.String()),
+ )
+ } else {
+ plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String())
+ }
+ return false
+ }
+ ok = true
+ }
+ return ok
+}
+
+// getVersion returns the Versions of the given member via its
+// peerURLs. Returns the last error if it fails to get the version.
+func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
+ cc := &http.Client{
+ Transport: rt,
+ }
+ var (
+ err error
+ resp *http.Response
+ )
+
+ for _, u := range m.PeerURLs {
+ addr := u + "/version"
+ resp, err = cc.Get(addr)
+ if err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to reach the peer URL",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
+ }
+ continue
+ }
+ var b []byte
+ b, err = ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to read body of response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err)
+ }
+ continue
+ }
+ var vers version.Versions
+ if err = json.Unmarshal(b, &vers); err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to unmarshal response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
+ }
+ continue
+ }
+ return &vers, nil
+ }
+ return nil, err
+}
+
+func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.RoundTripper) ([]*membership.Member, error) {
+ cc := &http.Client{Transport: peerRt}
+ // TODO: refactor member http handler code
+ // cannot import etcdhttp, so manually construct url
+ requestUrl := url + "/members/promote/" + fmt.Sprintf("%d", id)
+ req, err := http.NewRequest("POST", requestUrl, nil)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+ resp, err := cc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode == http.StatusRequestTimeout {
+ return nil, ErrTimeout
+ }
+ if resp.StatusCode == http.StatusPreconditionFailed {
+ // both ErrMemberNotLearner and ErrLearnerNotReady have same http status code
+ if strings.Contains(string(b), ErrLearnerNotReady.Error()) {
+ return nil, ErrLearnerNotReady
+ }
+ if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) {
+ return nil, membership.ErrMemberNotLearner
+ }
+ return nil, fmt.Errorf("member promote: unknown error(%s)", string(b))
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ return nil, membership.ErrIDNotFound
+ }
+
+ if resp.StatusCode != http.StatusOK { // all other types of errors
+ return nil, fmt.Errorf("member promote: unknown error(%s)", string(b))
+ }
+
+ var membs []*membership.Member
+ if err := json.Unmarshal(b, &membs); err != nil {
+ return nil, err
+ }
+ return membs, nil
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/config.go b/vendor/go.etcd.io/etcd/etcdserver/config.go
new file mode 100644
index 000000000000..88cd721c3259
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/config.go
@@ -0,0 +1,307 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/pkg/netutil"
+ "go.etcd.io/etcd/pkg/transport"
+ "go.etcd.io/etcd/pkg/types"
+
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
+type ServerConfig struct {
+ Name string
+ DiscoveryURL string
+ DiscoveryProxy string
+ ClientURLs types.URLs
+ PeerURLs types.URLs
+ DataDir string
+ // DedicatedWALDir config will make the etcd to write the WAL to the WALDir
+ // rather than the dataDir/member/wal.
+ DedicatedWALDir string
+
+ SnapshotCount uint64
+
+ // SnapshotCatchUpEntries is the number of entries for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ // WARNING: only change this for tests. Always use "DefaultSnapshotCatchUpEntries"
+ SnapshotCatchUpEntries uint64
+
+ MaxSnapFiles uint
+ MaxWALFiles uint
+
+ // BackendBatchInterval is the maximum time before commit the backend transaction.
+ BackendBatchInterval time.Duration
+ // BackendBatchLimit is the maximum operations before commit the backend transaction.
+ BackendBatchLimit int
+
+ // BackendFreelistType is the type of the backend boltdb freelist.
+ BackendFreelistType bolt.FreelistType
+
+ InitialPeerURLsMap types.URLsMap
+ InitialClusterToken string
+ NewCluster bool
+ PeerTLSInfo transport.TLSInfo
+
+ CORS map[string]struct{}
+
+ // HostWhitelist lists acceptable hostnames from client requests.
+ // If server is insecure (no TLS), server only accepts requests
+ // whose Host header value exists in this white list.
+ HostWhitelist map[string]struct{}
+
+ TickMs uint
+ ElectionTicks int
+
+ // InitialElectionTickAdvance is true, then local member fast-forwards
+ // election ticks to speed up "initial" leader election trigger. This
+ // benefits the case of larger election ticks. For instance, cross
+ // datacenter deployment may require longer election timeout of 10-second.
+ // If true, local node does not need wait up to 10-second. Instead,
+ // forwards its election ticks to 8-second, and have only 2-second left
+ // before leader election.
+ //
+ // Major assumptions are that:
+ // - cluster has no active leader thus advancing ticks enables faster
+ // leader election, or
+ // - cluster already has an established leader, and rejoining follower
+ // is likely to receive heartbeats from the leader after tick advance
+ // and before election timeout.
+ //
+ // However, when network from leader to rejoining follower is congested,
+ // and the follower does not receive leader heartbeat within left election
+ // ticks, disruptive election has to happen thus affecting cluster
+ // availabilities.
+ //
+ // Disabling this would slow down initial bootstrap process for cross
+ // datacenter deployments. Make your own tradeoffs by configuring
+ // --initial-election-tick-advance at the cost of slow initial bootstrap.
+ //
+ // If single-node, it advances ticks regardless.
+ //
+ // See https://github.com/etcd-io/etcd/issues/9333 for more detail.
+ InitialElectionTickAdvance bool
+
+ BootstrapTimeout time.Duration
+
+ AutoCompactionRetention time.Duration
+ AutoCompactionMode string
+ CompactionBatchLimit int
+ QuotaBackendBytes int64
+ MaxTxnOps uint
+
+ // MaxRequestBytes is the maximum request size to send over raft.
+ MaxRequestBytes uint
+
+ StrictReconfigCheck bool
+
+ // ClientCertAuthEnabled is true when cert has been signed by the client CA.
+ ClientCertAuthEnabled bool
+
+ AuthToken string
+ BcryptCost uint
+
+ // InitialCorruptCheck is true to check data corruption on boot
+ // before serving any peer/client traffic.
+ InitialCorruptCheck bool
+ CorruptCheckTime time.Duration
+
+ // PreVote is true to enable Raft Pre-Vote.
+ PreVote bool
+
+ // Logger logs server-side operations.
+ // If not nil, it disables "capnslog" and uses the given logger.
+ Logger *zap.Logger
+
+ // LoggerConfig is server logger configuration for Raft logger.
+ // Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil".
+ LoggerConfig *zap.Config
+ // LoggerCore is "zapcore.Core" for raft logger.
+ // Must be either: "LoggerConfig != nil" or "LoggerCore != nil && LoggerWriteSyncer != nil".
+ LoggerCore zapcore.Core
+ LoggerWriteSyncer zapcore.WriteSyncer
+
+ Debug bool
+
+ ForceNewCluster bool
+
+ // EnableLeaseCheckpoint enables primary lessor to persist lease remainingTTL to prevent indefinite auto-renewal of long lived leases.
+ EnableLeaseCheckpoint bool
+ // LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints.
+ LeaseCheckpointInterval time.Duration
+
+ EnableGRPCGateway bool
+}
+
+// VerifyBootstrap sanity-checks the initial config for bootstrap case
+// and returns an error for things that should never happen.
+func (c *ServerConfig) VerifyBootstrap() error {
+ if err := c.hasLocalMember(); err != nil {
+ return err
+ }
+ if err := c.advertiseMatchesCluster(); err != nil {
+ return err
+ }
+ if checkDuplicateURL(c.InitialPeerURLsMap) {
+ return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
+ }
+ if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
+ return fmt.Errorf("initial cluster unset and no discovery URL found")
+ }
+ return nil
+}
+
+// VerifyJoinExisting sanity-checks the initial config for join existing cluster
+// case and returns an error for things that should never happen.
+func (c *ServerConfig) VerifyJoinExisting() error {
+ // The member has announced its peer urls to the cluster before starting; no need to
+ // set the configuration again.
+ if err := c.hasLocalMember(); err != nil {
+ return err
+ }
+ if checkDuplicateURL(c.InitialPeerURLsMap) {
+ return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
+ }
+ if c.DiscoveryURL != "" {
+ return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
+ }
+ return nil
+}
+
+// hasLocalMember checks that the cluster at least contains the local server.
+func (c *ServerConfig) hasLocalMember() error {
+ if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
+ return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
+ }
+ return nil
+}
+
+// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
+func (c *ServerConfig) advertiseMatchesCluster() error {
+ urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
+ urls.Sort()
+ sort.Strings(apurls)
+ ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+ defer cancel()
+ ok, err := netutil.URLStringsEqual(ctx, c.Logger, apurls, urls.StringSlice())
+ if ok {
+ return nil
+ }
+
+ initMap, apMap := make(map[string]struct{}), make(map[string]struct{})
+ for _, url := range c.PeerURLs {
+ apMap[url.String()] = struct{}{}
+ }
+ for _, url := range c.InitialPeerURLsMap[c.Name] {
+ initMap[url.String()] = struct{}{}
+ }
+
+ missing := []string{}
+ for url := range initMap {
+ if _, ok := apMap[url]; !ok {
+ missing = append(missing, url)
+ }
+ }
+ if len(missing) > 0 {
+ for i := range missing {
+ missing[i] = c.Name + "=" + missing[i]
+ }
+ mstr := strings.Join(missing, ",")
+ apStr := strings.Join(apurls, ",")
+ return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%v)", mstr, apStr, err)
+ }
+
+ for url := range apMap {
+ if _, ok := initMap[url]; !ok {
+ missing = append(missing, url)
+ }
+ }
+ if len(missing) > 0 {
+ mstr := strings.Join(missing, ",")
+ umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
+ return fmt.Errorf("--initial-advertise-peer-urls has %s but missing from --initial-cluster=%s", mstr, umap.String())
+ }
+
+ // resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed
+ apStr := strings.Join(apurls, ",")
+ umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
+ return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%v)", apStr, umap.String(), err)
+}
+
+func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
+
+func (c *ServerConfig) WALDir() string {
+ if c.DedicatedWALDir != "" {
+ return c.DedicatedWALDir
+ }
+ return filepath.Join(c.MemberDir(), "wal")
+}
+
+func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
+
+func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
+
+// ReqTimeout returns timeout for request to finish.
+func (c *ServerConfig) ReqTimeout() time.Duration {
+ // 5s for queue waiting, computation and disk IO delay
+ // + 2 * election timeout for possible leader election
+ return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
+}
+
+func (c *ServerConfig) electionTimeout() time.Duration {
+ return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond
+}
+
+func (c *ServerConfig) peerDialTimeout() time.Duration {
+ // 1s for queue wait and election timeout
+ return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
+}
+
+func checkDuplicateURL(urlsmap types.URLsMap) bool {
+ um := make(map[string]bool)
+ for _, urls := range urlsmap {
+ for _, url := range urls {
+ u := url.String()
+ if um[u] {
+ return true
+ }
+ um[u] = true
+ }
+ }
+ return false
+}
+
+func (c *ServerConfig) bootstrapTimeout() time.Duration {
+ if c.BootstrapTimeout != 0 {
+ return c.BootstrapTimeout
+ }
+ return time.Second
+}
+
+func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }
diff --git a/vendor/go.etcd.io/etcd/etcdserver/consistent_index.go b/vendor/go.etcd.io/etcd/etcdserver/consistent_index.go
new file mode 100644
index 000000000000..d513f6708d33
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/consistent_index.go
@@ -0,0 +1,33 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "sync/atomic"
+)
+
+// consistentIndex represents the offset of an entry in a consistent replica log.
+// It implements the mvcc.ConsistentIndexGetter interface.
+// It is always set to the offset of current entry before executing the entry,
+// so ConsistentWatchableKV could get the consistent index from it.
+type consistentIndex uint64
+
+func (i *consistentIndex) setConsistentIndex(v uint64) {
+ atomic.StoreUint64((*uint64)(i), v)
+}
+
+func (i *consistentIndex) ConsistentIndex() uint64 {
+ return atomic.LoadUint64((*uint64)(i))
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/corrupt.go b/vendor/go.etcd.io/etcd/etcdserver/corrupt.go
new file mode 100644
index 000000000000..e243d98ba6d2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/corrupt.go
@@ -0,0 +1,521 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/mvcc"
+ "go.etcd.io/etcd/pkg/traceutil"
+ "go.etcd.io/etcd/pkg/types"
+
+ "go.uber.org/zap"
+)
+
+// CheckInitialHashKV compares initial hash values with its peers
+// before serving any peer/client traffic. Only mismatch when hashes
+// are different at requested revision, with same compact revision.
+func (s *EtcdServer) CheckInitialHashKV() error {
+ if !s.Cfg.InitialCorruptCheck {
+ return nil
+ }
+
+ lg := s.getLogger()
+
+ if lg != nil {
+ lg.Info(
+ "starting initial corruption check",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Duration("timeout", s.Cfg.ReqTimeout()),
+ )
+ } else {
+ plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout())
+ }
+
+ h, rev, crev, err := s.kv.HashByRev(0)
+ if err != nil {
+ return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err)
+ }
+ peers := s.getPeerHashKVs(rev)
+ mismatch := 0
+ for _, p := range peers {
+ if p.resp != nil {
+ peerID := types.ID(p.resp.Header.MemberId)
+ fields := []zap.Field{
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int64("local-member-revision", rev),
+ zap.Int64("local-member-compact-revision", crev),
+ zap.Uint32("local-member-hash", h),
+ zap.String("remote-peer-id", peerID.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Int64("remote-peer-revision", p.resp.Header.Revision),
+ zap.Int64("remote-peer-compact-revision", p.resp.CompactRevision),
+ zap.Uint32("remote-peer-hash", p.resp.Hash),
+ }
+
+ if h != p.resp.Hash {
+ if crev == p.resp.CompactRevision {
+ if lg != nil {
+ lg.Warn("found different hash values from remote peer", fields...)
+ } else {
+ plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev)
+ }
+ mismatch++
+ } else {
+ if lg != nil {
+ lg.Warn("found different compact revision values from remote peer", fields...)
+ } else {
+ plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev)
+ }
+ }
+ }
+
+ continue
+ }
+
+ if p.err != nil {
+ switch p.err {
+ case rpctypes.ErrFutureRev:
+ if lg != nil {
+ lg.Warn(
+ "cannot fetch hash from slow remote peer",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int64("local-member-revision", rev),
+ zap.Int64("local-member-compact-revision", crev),
+ zap.Uint32("local-member-hash", h),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
+ }
+ case rpctypes.ErrCompacted:
+ if lg != nil {
+ lg.Warn(
+ "cannot fetch hash from remote peer; local member is behind",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int64("local-member-revision", rev),
+ zap.Int64("local-member-compact-revision", crev),
+ zap.Uint32("local-member-hash", h),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
+ }
+ }
+ }
+ }
+ if mismatch > 0 {
+ return fmt.Errorf("%s found data inconsistency with peers", s.ID())
+ }
+
+ if lg != nil {
+ lg.Info(
+ "initial corruption checking passed; no corruption",
+ zap.String("local-member-id", s.ID().String()),
+ )
+ } else {
+ plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID())
+ }
+ return nil
+}
+
+func (s *EtcdServer) monitorKVHash() {
+ t := s.Cfg.CorruptCheckTime
+ if t == 0 {
+ return
+ }
+
+ lg := s.getLogger()
+ if lg != nil {
+ lg.Info(
+ "enabled corruption checking",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Duration("interval", t),
+ )
+ } else {
+ plog.Infof("enabled corruption checking with %s interval", t)
+ }
+
+ for {
+ select {
+ case <-s.stopping:
+ return
+ case <-time.After(t):
+ }
+ if !s.isLeader() {
+ continue
+ }
+ if err := s.checkHashKV(); err != nil {
+ if lg != nil {
+ lg.Warn("failed to check hash KV", zap.Error(err))
+ } else {
+ plog.Debugf("check hash kv failed %v", err)
+ }
+ }
+ }
+}
+
+func (s *EtcdServer) checkHashKV() error {
+ lg := s.getLogger()
+
+ h, rev, crev, err := s.kv.HashByRev(0)
+ if err != nil {
+ return err
+ }
+ peers := s.getPeerHashKVs(rev)
+
+ ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+ err = s.linearizableReadNotify(ctx)
+ cancel()
+ if err != nil {
+ return err
+ }
+
+ h2, rev2, crev2, err := s.kv.HashByRev(0)
+ if err != nil {
+ return err
+ }
+
+ alarmed := false
+ mismatch := func(id uint64) {
+ if alarmed {
+ return
+ }
+ alarmed = true
+ a := &pb.AlarmRequest{
+ MemberID: id,
+ Action: pb.AlarmRequest_ACTIVATE,
+ Alarm: pb.AlarmType_CORRUPT,
+ }
+ s.goAttach(func() {
+ s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
+ })
+ }
+
+ if h2 != h && rev2 == rev && crev == crev2 {
+ if lg != nil {
+ lg.Warn(
+ "found hash mismatch",
+ zap.Int64("revision-1", rev),
+ zap.Int64("compact-revision-1", crev),
+ zap.Uint32("hash-1", h),
+ zap.Int64("revision-2", rev2),
+ zap.Int64("compact-revision-2", crev2),
+ zap.Uint32("hash-2", h2),
+ )
+ } else {
+ plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev)
+ }
+ mismatch(uint64(s.ID()))
+ }
+
+ checkedCount := 0
+ for _, p := range peers {
+ if p.resp == nil {
+ continue
+ }
+ checkedCount++
+ id := p.resp.Header.MemberId
+
+ // leader expects follower's latest revision less than or equal to leader's
+ if p.resp.Header.Revision > rev2 {
+ if lg != nil {
+ lg.Warn(
+ "revision from follower must be less than or equal to leader's",
+ zap.Int64("leader-revision", rev2),
+ zap.Int64("follower-revision", p.resp.Header.Revision),
+ zap.String("follower-peer-id", types.ID(id).String()),
+ )
+ } else {
+ plog.Warningf(
+ "revision %d from member %v, expected at most %d",
+ p.resp.Header.Revision,
+ types.ID(id),
+ rev2)
+ }
+ mismatch(id)
+ }
+
+ // leader expects follower's latest compact revision less than or equal to leader's
+ if p.resp.CompactRevision > crev2 {
+ if lg != nil {
+ lg.Warn(
+ "compact revision from follower must be less than or equal to leader's",
+ zap.Int64("leader-compact-revision", crev2),
+ zap.Int64("follower-compact-revision", p.resp.CompactRevision),
+ zap.String("follower-peer-id", types.ID(id).String()),
+ )
+ } else {
+ plog.Warningf(
+ "compact revision %d from member %v, expected at most %d",
+ p.resp.CompactRevision,
+ types.ID(id),
+ crev2,
+ )
+ }
+ mismatch(id)
+ }
+
+ // follower's compact revision is leader's old one, then hashes must match
+ if p.resp.CompactRevision == crev && p.resp.Hash != h {
+ if lg != nil {
+ lg.Warn(
+ "same compact revision then hashes must match",
+ zap.Int64("leader-compact-revision", crev2),
+ zap.Uint32("leader-hash", h),
+ zap.Int64("follower-compact-revision", p.resp.CompactRevision),
+ zap.Uint32("follower-hash", p.resp.Hash),
+ zap.String("follower-peer-id", types.ID(id).String()),
+ )
+ } else {
+ plog.Warningf(
+ "hash %d at revision %d from member %v, expected hash %d",
+ p.resp.Hash,
+ rev,
+ types.ID(id),
+ h,
+ )
+ }
+ mismatch(id)
+ }
+ }
+ if lg != nil {
+ lg.Info("finished peer corruption check", zap.Int("number-of-peers-checked", checkedCount))
+ } else {
+ plog.Infof("finished peer corruption check")
+ }
+
+ return nil
+}
+
+type peerInfo struct {
+ id types.ID
+ eps []string
+}
+
+type peerHashKVResp struct {
+ peerInfo
+ resp *pb.HashKVResponse
+ err error
+}
+
+func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
+ // TODO: handle the case when "s.cluster.Members" have not
+ // been populated (e.g. no snapshot to load from disk)
+ members := s.cluster.Members()
+ peers := make([]peerInfo, 0, len(members))
+ for _, m := range members {
+ if m.ID == s.ID() {
+ continue
+ }
+ peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs})
+ }
+
+ lg := s.getLogger()
+
+ var resps []*peerHashKVResp
+ for _, p := range peers {
+ if len(p.eps) == 0 {
+ continue
+ }
+
+ respsLen := len(resps)
+ var lastErr error
+ for _, ep := range p.eps {
+ ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+
+ var resp *pb.HashKVResponse
+ resp, lastErr = s.getPeerHashKVHTTP(ctx, ep, rev)
+ cancel()
+ if lastErr == nil {
+ resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil})
+ break
+ }
+ if lg != nil {
+ lg.Warn(
+ "failed hash kv request",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int64("requested-revision", rev),
+ zap.String("remote-peer-endpoint", ep),
+ zap.Error(lastErr),
+ )
+ } else {
+ plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), lastErr.Error(), ep, rev)
+ }
+ }
+
+ // failed to get hashKV from all endpoints of this peer
+ if respsLen == len(resps) {
+ resps = append(resps, &peerHashKVResp{peerInfo: p, resp: nil, err: lastErr})
+ }
+ }
+ return resps
+}
+
+type applierV3Corrupt struct {
+ applierV3
+}
+
+func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
+
+func (a *applierV3Corrupt) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ return nil, nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) {
+ return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
+ return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
+ return nil, nil, nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return nil, ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ return nil, ErrCorrupt
+}
+
+type ServerPeerV2 interface {
+ ServerPeer
+ HashKVHandler() http.Handler
+}
+
+const PeerHashKVPath = "/members/hashkv"
+
+type hashKVHandler struct {
+ lg *zap.Logger
+ server *EtcdServer
+}
+
+func (s *EtcdServer) HashKVHandler() http.Handler {
+ return &hashKVHandler{lg: s.getLogger(), server: s}
+}
+
+func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+ if r.URL.Path != PeerHashKVPath {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+
+ defer r.Body.Close()
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "error reading body", http.StatusBadRequest)
+ return
+ }
+
+ req := &pb.HashKVRequest{}
+ if err = json.Unmarshal(b, req); err != nil {
+ h.lg.Warn("failed to unmarshal request", zap.Error(err))
+ http.Error(w, "error unmarshalling request", http.StatusBadRequest)
+ return
+ }
+ hash, rev, compactRev, err := h.server.KV().HashByRev(req.Revision)
+ if err != nil {
+ h.lg.Warn(
+ "failed to get hashKV",
+ zap.Int64("requested-revision", req.Revision),
+ zap.Error(err),
+ )
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: hash, CompactRevision: compactRev}
+ respBytes, err := json.Marshal(resp)
+ if err != nil {
+ h.lg.Warn("failed to marshal hashKV response", zap.Error(err))
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.server.Cluster().ID().String())
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(respBytes)
+}
+
+// getPeerHashKVHTTP fetch hash of kv store at the given rev via http call to the given url
+func (s *EtcdServer) getPeerHashKVHTTP(ctx context.Context, url string, rev int64) (*pb.HashKVResponse, error) {
+ cc := &http.Client{Transport: s.peerRt}
+ hashReq := &pb.HashKVRequest{Revision: rev}
+ hashReqBytes, err := json.Marshal(hashReq)
+ if err != nil {
+ return nil, err
+ }
+ requestUrl := url + PeerHashKVPath
+ req, err := http.NewRequest(http.MethodGet, requestUrl, bytes.NewReader(hashReqBytes))
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+ req.Header.Set("Content-Type", "application/json")
+ req.Cancel = ctx.Done()
+
+ resp, err := cc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ b, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode == http.StatusBadRequest {
+ if strings.Contains(string(b), mvcc.ErrCompacted.Error()) {
+ return nil, rpctypes.ErrCompacted
+ }
+ if strings.Contains(string(b), mvcc.ErrFutureRev.Error()) {
+ return nil, rpctypes.ErrFutureRev
+ }
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("unknown error: %s", string(b))
+ }
+
+ hashResp := &pb.HashKVResponse{}
+ if err := json.Unmarshal(b, hashResp); err != nil {
+ return nil, err
+ }
+ return hashResp, nil
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/doc.go b/vendor/go.etcd.io/etcd/etcdserver/doc.go
new file mode 100644
index 000000000000..b195d2d167a6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package etcdserver defines how etcd servers interact and store their states.
+package etcdserver
diff --git a/vendor/go.etcd.io/etcd/etcdserver/errors.go b/vendor/go.etcd.io/etcd/etcdserver/errors.go
new file mode 100644
index 000000000000..d0fe28970d12
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/errors.go
@@ -0,0 +1,51 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ ErrUnknownMethod = errors.New("etcdserver: unknown method")
+ ErrStopped = errors.New("etcdserver: server stopped")
+ ErrCanceled = errors.New("etcdserver: request cancelled")
+ ErrTimeout = errors.New("etcdserver: request timed out")
+ ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
+ ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
+ ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
+ ErrLeaderChanged = errors.New("etcdserver: leader changed")
+ ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
+ ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader")
+ ErrNoLeader = errors.New("etcdserver: no leader")
+ ErrNotLeader = errors.New("etcdserver: not leader")
+ ErrRequestTooLarge = errors.New("etcdserver: request is too large")
+ ErrNoSpace = errors.New("etcdserver: no space")
+ ErrTooManyRequests = errors.New("etcdserver: too many requests")
+ ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
+ ErrKeyNotFound = errors.New("etcdserver: key not found")
+ ErrCorrupt = errors.New("etcdserver: corrupt cluster")
+ ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee")
+)
+
+type DiscoveryError struct {
+ Op string
+ Err error
+}
+
+func (e DiscoveryError) Error() string {
+ return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go
new file mode 100644
index 000000000000..904c32187fb3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go
@@ -0,0 +1,2318 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: etcdserver/etcdserverpb/rpc.proto
+
+/*
+Package etcdserverpb is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package gw
+
+import (
+ "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+
+func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.RangeRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.PutRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.DeleteRangeRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.TxnRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.CompactionRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) {
+ var metadata runtime.ServerMetadata
+ stream, err := client.Watch(ctx)
+ if err != nil {
+ grpclog.Printf("Failed to start streaming: %v", err)
+ return nil, metadata, err
+ }
+ dec := marshaler.NewDecoder(req.Body)
+ handleSend := func() error {
+ var protoReq etcdserverpb.WatchRequest
+ err := dec.Decode(&protoReq)
+ if err == io.EOF {
+ return err
+ }
+ if err != nil {
+ grpclog.Printf("Failed to decode request: %v", err)
+ return err
+ }
+ if err := stream.Send(&protoReq); err != nil {
+ grpclog.Printf("Failed to send request: %v", err)
+ return err
+ }
+ return nil
+ }
+ if err := handleSend(); err != nil {
+ if cerr := stream.CloseSend(); cerr != nil {
+ grpclog.Printf("Failed to terminate client stream: %v", cerr)
+ }
+ if err == io.EOF {
+ return stream, metadata, nil
+ }
+ return nil, metadata, err
+ }
+ go func() {
+ for {
+ if err := handleSend(); err != nil {
+ break
+ }
+ }
+ if err := stream.CloseSend(); err != nil {
+ grpclog.Printf("Failed to terminate client stream: %v", err)
+ }
+ }()
+ header, err := stream.Header()
+ if err != nil {
+ grpclog.Printf("Failed to get header from client: %v", err)
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.LeaseGrantRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.LeaseRevokeRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.LeaseRevokeRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) {
+ var metadata runtime.ServerMetadata
+ stream, err := client.LeaseKeepAlive(ctx)
+ if err != nil {
+ grpclog.Printf("Failed to start streaming: %v", err)
+ return nil, metadata, err
+ }
+ dec := marshaler.NewDecoder(req.Body)
+ handleSend := func() error {
+ var protoReq etcdserverpb.LeaseKeepAliveRequest
+ err := dec.Decode(&protoReq)
+ if err == io.EOF {
+ return err
+ }
+ if err != nil {
+ grpclog.Printf("Failed to decode request: %v", err)
+ return err
+ }
+ if err := stream.Send(&protoReq); err != nil {
+ grpclog.Printf("Failed to send request: %v", err)
+ return err
+ }
+ return nil
+ }
+ if err := handleSend(); err != nil {
+ if cerr := stream.CloseSend(); cerr != nil {
+ grpclog.Printf("Failed to terminate client stream: %v", cerr)
+ }
+ if err == io.EOF {
+ return stream, metadata, nil
+ }
+ return nil, metadata, err
+ }
+ go func() {
+ for {
+ if err := handleSend(); err != nil {
+ break
+ }
+ }
+ if err := stream.CloseSend(); err != nil {
+ grpclog.Printf("Failed to terminate client stream: %v", err)
+ }
+ }()
+ header, err := stream.Header()
+ if err != nil {
+ grpclog.Printf("Failed to get header from client: %v", err)
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.LeaseTimeToLiveRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.LeaseTimeToLiveRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.LeaseLeasesRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.LeaseLeasesRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.MemberAddRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.MemberRemoveRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.MemberUpdateRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.MemberListRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.MemberPromoteRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.MemberPromote(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AlarmRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.StatusRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.DefragmentRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.HashRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.HashKVRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.HashKV(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.SnapshotRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ stream, err := client.Snapshot(ctx, &protoReq)
+ if err != nil {
+ return nil, metadata, err
+ }
+ header, err := stream.Header()
+ if err != nil {
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+
+}
+
+func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.MoveLeaderRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.MoveLeader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthEnableRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthDisableRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthenticateRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthUserAddRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthUserGetRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthUserListRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthUserDeleteRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthUserChangePasswordRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthUserGrantRoleRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthUserRevokeRoleRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthRoleAddRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthRoleGetRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthRoleListRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthRoleDeleteRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthRoleGrantPermissionRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq etcdserverpb.AuthRoleRevokePermissionRequest
+ var metadata runtime.ServerMetadata
+
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+// RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterKVHandler(ctx, mux, conn)
+}
+
+// RegisterKVHandler registers the http handlers for service KV to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn))
+}
+
+// RegisterKVHandler registers the http handlers for service KV to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "KVClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "KVClient" to call the correct interceptors.
+func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error {
+
+ mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "range"}, ""))
+
+ pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "put"}, ""))
+
+ pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "deleterange"}, ""))
+
+ pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "txn"}, ""))
+
+ pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "compaction"}, ""))
+)
+
+var (
+ forward_KV_Range_0 = runtime.ForwardResponseMessage
+
+ forward_KV_Put_0 = runtime.ForwardResponseMessage
+
+ forward_KV_DeleteRange_0 = runtime.ForwardResponseMessage
+
+ forward_KV_Txn_0 = runtime.ForwardResponseMessage
+
+ forward_KV_Compact_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterWatchHandler(ctx, mux, conn)
+}
+
+// RegisterWatchHandler registers the http handlers for service Watch to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn))
+}
+
+// RegisterWatchHandler registers the http handlers for service Watch to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "WatchClient" to call the correct interceptors.
+func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error {
+
+ mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3", "watch"}, ""))
+)
+
+var (
+ forward_Watch_Watch_0 = runtime.ForwardResponseStream
+)
+
+// RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterLeaseHandler(ctx, mux, conn)
+}
+
+// RegisterLeaseHandler registers the http handlers for service Lease to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn))
+}
+
+// RegisterLeaseHandler registers the http handlers for service Lease to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "LeaseClient" to call the correct interceptors.
+func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error {
+
+ mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseRevoke_1(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lease_LeaseRevoke_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseTimeToLive_1(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lease_LeaseTimeToLive_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseLeases_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseLeases_1(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Lease_LeaseLeases_1(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "grant"}, ""))
+
+ pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "revoke"}, ""))
+
+ pattern_Lease_LeaseRevoke_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "revoke"}, ""))
+
+ pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "keepalive"}, ""))
+
+ pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "timetolive"}, ""))
+
+ pattern_Lease_LeaseTimeToLive_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "timetolive"}, ""))
+
+ pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "leases"}, ""))
+
+ pattern_Lease_LeaseLeases_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "leases"}, ""))
+)
+
+var (
+ forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage
+
+ forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
+
+ forward_Lease_LeaseRevoke_1 = runtime.ForwardResponseMessage
+
+ forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
+
+ forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage
+
+ forward_Lease_LeaseTimeToLive_1 = runtime.ForwardResponseMessage
+
+ forward_Lease_LeaseLeases_0 = runtime.ForwardResponseMessage
+
+ forward_Lease_LeaseLeases_1 = runtime.ForwardResponseMessage
+)
+
+// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterClusterHandler(ctx, mux, conn)
+}
+
+// RegisterClusterHandler registers the http handlers for service Cluster to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn))
+}
+
+// RegisterClusterHandler registers the http handlers for service Cluster to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "ClusterClient" to call the correct interceptors.
+func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error {
+
+ mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberPromote_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Cluster_MemberPromote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "add"}, ""))
+
+ pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "remove"}, ""))
+
+ pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "update"}, ""))
+
+ pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "list"}, ""))
+
+ pattern_Cluster_MemberPromote_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "promote"}, ""))
+)
+
+var (
+ forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage
+
+ forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage
+
+ forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage
+
+ forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage
+
+ forward_Cluster_MemberPromote_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterMaintenanceHandler(ctx, mux, conn)
+}
+
+// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn))
+}
+
+// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "MaintenanceClient" to call the correct interceptors.
+func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error {
+
+ mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_HashKV_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "alarm"}, ""))
+
+ pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "status"}, ""))
+
+ pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "defragment"}, ""))
+
+ pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, ""))
+
+ pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, ""))
+
+ pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "snapshot"}, ""))
+
+ pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "transfer-leadership"}, ""))
+)
+
+var (
+ forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage
+
+ forward_Maintenance_Status_0 = runtime.ForwardResponseMessage
+
+ forward_Maintenance_Defragment_0 = runtime.ForwardResponseMessage
+
+ forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage
+
+ forward_Maintenance_HashKV_0 = runtime.ForwardResponseMessage
+
+ forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream
+
+ forward_Maintenance_MoveLeader_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterAuthHandler(ctx, mux, conn)
+}
+
+// RegisterAuthHandler registers the http handlers for service Auth to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn))
+}
+
+// RegisterAuthHandler registers the http handlers for service Auth to "mux".
+// The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "AuthClient" to call the correct interceptors.
+func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error {
+
+ mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ if cn, ok := w.(http.CloseNotifier); ok {
+ go func(done <-chan struct{}, closed <-chan bool) {
+ select {
+ case <-done:
+ case <-closed:
+ cancel()
+ }
+ }(ctx.Done(), cn.CloseNotify())
+ }
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "enable"}, ""))
+
+ pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "disable"}, ""))
+
+ pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "authenticate"}, ""))
+
+ pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "add"}, ""))
+
+ pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "get"}, ""))
+
+ pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "list"}, ""))
+
+ pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "delete"}, ""))
+
+ pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "changepw"}, ""))
+
+ pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "grant"}, ""))
+
+ pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "revoke"}, ""))
+
+ pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "add"}, ""))
+
+ pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "get"}, ""))
+
+ pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "list"}, ""))
+
+ pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "delete"}, ""))
+
+ pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "grant"}, ""))
+
+ pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "revoke"}, ""))
+)
+
+var (
+ forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_UserGet_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_UserList_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_RoleList_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage
+
+ forward_Auth_RoleRevokePermission_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go
index 199ee6244d5e..6cbccc797c43 100644
--- a/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go
+++ b/vendor/go.etcd.io/etcd/etcdserver/etcdserverpb/rpc.pb.go
@@ -104,7 +104,9 @@ var RangeRequest_SortTarget_value = map[string]int32{
func (x RangeRequest_SortTarget) String() string {
return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
}
-func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1, 1} }
+func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptorRpc, []int{1, 1}
+}
type Compare_CompareResult int32
diff --git a/vendor/go.etcd.io/etcd/etcdserver/metrics.go b/vendor/go.etcd.io/etcd/etcdserver/metrics.go
new file mode 100644
index 000000000000..e0c0cde85538
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/metrics.go
@@ -0,0 +1,221 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ goruntime "runtime"
+ "time"
+
+ "go.etcd.io/etcd/pkg/runtime"
+ "go.etcd.io/etcd/version"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap"
+)
+
+var (
+ hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "has_leader",
+ Help: "Whether or not a leader exists. 1 is existence, 0 is not.",
+ })
+ isLeader = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "is_leader",
+ Help: "Whether or not this member is a leader. 1 if is, 0 otherwise.",
+ })
+ leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "leader_changes_seen_total",
+ Help: "The number of leader changes seen.",
+ })
+ isLearner = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "is_learner",
+ Help: "Whether or not this member is a learner. 1 if is, 0 otherwise.",
+ })
+ learnerPromoteFailed = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "learner_promote_failures",
+ Help: "The total number of failed learner promotions (likely learner not ready) while this member is leader.",
+ },
+ []string{"Reason"},
+ )
+ learnerPromoteSucceed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "learner_promote_successes",
+ Help: "The total number of successful learner promotions while this member is leader.",
+ })
+ heartbeatSendFailures = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "heartbeat_send_failures_total",
+ Help: "The total number of leader heartbeat send failures (likely overloaded from slow disk).",
+ })
+ slowApplies = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "slow_apply_total",
+ Help: "The total number of slow apply requests (likely overloaded from slow disk).",
+ })
+ applySnapshotInProgress = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "snapshot_apply_in_progress_total",
+ Help: "1 if the server is applying the incoming snapshot. 0 if none.",
+ })
+ proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "proposals_committed_total",
+ Help: "The total number of consensus proposals committed.",
+ })
+ proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "proposals_applied_total",
+ Help: "The total number of consensus proposals applied.",
+ })
+ proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "proposals_pending",
+ Help: "The current number of pending proposals to commit.",
+ })
+ proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "proposals_failed_total",
+ Help: "The total number of failed proposals seen.",
+ })
+ slowReadIndex = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "slow_read_indexes_total",
+ Help: "The total number of pending read indexes not in sync with leader's or timed out read index requests.",
+ })
+ readIndexFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "read_indexes_failed_total",
+ Help: "The total number of failed read indexes seen.",
+ })
+ leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "server",
+ Name: "lease_expired_total",
+ Help: "The total number of expired leases.",
+ })
+ quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "quota_backend_bytes",
+ Help: "Current backend storage quota size in bytes.",
+ })
+ currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "version",
+ Help: "Which version is running. 1 for 'server_version' label with current version.",
+ },
+ []string{"server_version"})
+ currentGoVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "go_version",
+ Help: "Which Go version server is running with. 1 for 'server_go_version' label with current version.",
+ },
+ []string{"server_go_version"})
+ serverID = prometheus.NewGaugeVec(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "id",
+ Help: "Server or member ID in hexadecimal format. 1 for 'server_id' label with current ID.",
+ },
+ []string{"server_id"})
+)
+
+func init() {
+ prometheus.MustRegister(hasLeader)
+ prometheus.MustRegister(isLeader)
+ prometheus.MustRegister(leaderChanges)
+ prometheus.MustRegister(heartbeatSendFailures)
+ prometheus.MustRegister(slowApplies)
+ prometheus.MustRegister(applySnapshotInProgress)
+ prometheus.MustRegister(proposalsCommitted)
+ prometheus.MustRegister(proposalsApplied)
+ prometheus.MustRegister(proposalsPending)
+ prometheus.MustRegister(proposalsFailed)
+ prometheus.MustRegister(slowReadIndex)
+ prometheus.MustRegister(readIndexFailed)
+ prometheus.MustRegister(leaseExpired)
+ prometheus.MustRegister(quotaBackendBytes)
+ prometheus.MustRegister(currentVersion)
+ prometheus.MustRegister(currentGoVersion)
+ prometheus.MustRegister(serverID)
+ prometheus.MustRegister(isLearner)
+ prometheus.MustRegister(learnerPromoteSucceed)
+ prometheus.MustRegister(learnerPromoteFailed)
+
+ currentVersion.With(prometheus.Labels{
+ "server_version": version.Version,
+ }).Set(1)
+ currentGoVersion.With(prometheus.Labels{
+ "server_go_version": goruntime.Version(),
+ }).Set(1)
+}
+
+func monitorFileDescriptor(lg *zap.Logger, done <-chan struct{}) {
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+ for {
+ used, err := runtime.FDUsage()
+ if err != nil {
+ if lg != nil {
+ lg.Warn("failed to get file descriptor usage", zap.Error(err))
+ } else {
+ plog.Errorf("cannot monitor file descriptor usage (%v)", err)
+ }
+ return
+ }
+ limit, err := runtime.FDLimit()
+ if err != nil {
+ if lg != nil {
+ lg.Warn("failed to get file descriptor limit", zap.Error(err))
+ } else {
+ plog.Errorf("cannot monitor file descriptor usage (%v)", err)
+ }
+ return
+ }
+ if used >= limit/5*4 {
+ if lg != nil {
+ lg.Warn("80% of file descriptors are used", zap.Uint64("used", used), zap.Uint64("limit", limit))
+ } else {
+ plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit)
+ }
+ }
+ select {
+ case <-ticker.C:
+ case <-done:
+ return
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/quota.go b/vendor/go.etcd.io/etcd/etcdserver/quota.go
new file mode 100644
index 000000000000..6d70430e73cb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/quota.go
@@ -0,0 +1,182 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "sync"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+)
+
+const (
+ // DefaultQuotaBytes is the number of bytes the backend Size may
+ // consume before exceeding the space quota.
+ DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
+ // MaxQuotaBytes is the maximum number of bytes suggested for a backend
+ // quota. A larger quota may lead to degraded performance.
+ MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
+)
+
+// Quota represents an arbitrary quota against arbitrary requests. Each request
+// costs some charge; if there is not enough remaining charge, then there are
+// too few resources available within the quota to apply the request.
+type Quota interface {
+ // Available judges whether the given request fits within the quota.
+ Available(req interface{}) bool
+ // Cost computes the charge against the quota for a given request.
+ Cost(req interface{}) int
+ // Remaining is the amount of charge left for the quota.
+ Remaining() int64
+}
+
+type passthroughQuota struct{}
+
+func (*passthroughQuota) Available(interface{}) bool { return true }
+func (*passthroughQuota) Cost(interface{}) int { return 0 }
+func (*passthroughQuota) Remaining() int64 { return 1 }
+
+type backendQuota struct {
+ s *EtcdServer
+ maxBackendBytes int64
+}
+
+const (
+ // leaseOverhead is an estimate for the cost of storing a lease
+ leaseOverhead = 64
+ // kvOverhead is an estimate for the cost of storing a key's metadata
+ kvOverhead = 256
+)
+
+var (
+ // only log once
+ quotaLogOnce sync.Once
+
+ DefaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes))
+ maxQuotaSize = humanize.Bytes(uint64(MaxQuotaBytes))
+)
+
+// NewBackendQuota creates a quota layer with the given storage limit.
+func NewBackendQuota(s *EtcdServer, name string) Quota {
+ lg := s.getLogger()
+ quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes))
+
+ if s.Cfg.QuotaBackendBytes < 0 {
+ // disable quotas if negative
+ quotaLogOnce.Do(func() {
+ if lg != nil {
+ lg.Info(
+ "disabled backend quota",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
+ )
+ } else {
+ plog.Warningf("disabling backend quota")
+ }
+ })
+ return &passthroughQuota{}
+ }
+
+ if s.Cfg.QuotaBackendBytes == 0 {
+ // use default size if no quota size given
+ quotaLogOnce.Do(func() {
+ if lg != nil {
+ lg.Info(
+ "enabled backend quota with default value",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", DefaultQuotaBytes),
+ zap.String("quota-size", DefaultQuotaSize),
+ )
+ }
+ })
+ quotaBackendBytes.Set(float64(DefaultQuotaBytes))
+ return &backendQuota{s, DefaultQuotaBytes}
+ }
+
+ quotaLogOnce.Do(func() {
+ if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
+ if lg != nil {
+ lg.Warn(
+ "quota exceeds the maximum value",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
+ zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
+ zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes),
+ zap.String("quota-maximum-size", maxQuotaSize),
+ )
+ } else {
+ plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
+ }
+ }
+ if lg != nil {
+ lg.Info(
+ "enabled backend quota",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
+ zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
+ )
+ }
+ })
+ return &backendQuota{s, s.Cfg.QuotaBackendBytes}
+}
+
+func (b *backendQuota) Available(v interface{}) bool {
+ // TODO: maybe optimize backend.Size()
+ return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
+}
+
+func (b *backendQuota) Cost(v interface{}) int {
+ switch r := v.(type) {
+ case *pb.PutRequest:
+ return costPut(r)
+ case *pb.TxnRequest:
+ return costTxn(r)
+ case *pb.LeaseGrantRequest:
+ return leaseOverhead
+ default:
+ panic("unexpected cost")
+ }
+}
+
+func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
+
+func costTxnReq(u *pb.RequestOp) int {
+ r := u.GetRequestPut()
+ if r == nil {
+ return 0
+ }
+ return costPut(r)
+}
+
+func costTxn(r *pb.TxnRequest) int {
+ sizeSuccess := 0
+ for _, u := range r.Success {
+ sizeSuccess += costTxnReq(u)
+ }
+ sizeFailure := 0
+ for _, u := range r.Failure {
+ sizeFailure += costTxnReq(u)
+ }
+ if sizeFailure > sizeSuccess {
+ return sizeFailure
+ }
+ return sizeSuccess
+}
+
+func (b *backendQuota) Remaining() int64 {
+ return b.maxBackendBytes - b.s.Backend().Size()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/raft.go b/vendor/go.etcd.io/etcd/etcdserver/raft.go
new file mode 100644
index 000000000000..e7cf0729929f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/raft.go
@@ -0,0 +1,780 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "encoding/json"
+ "expvar"
+ "fmt"
+ "log"
+ "sort"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/etcdserver/api/rafthttp"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/contention"
+ "go.etcd.io/etcd/pkg/logutil"
+ "go.etcd.io/etcd/pkg/pbutil"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft"
+ "go.etcd.io/etcd/raft/raftpb"
+ "go.etcd.io/etcd/wal"
+ "go.etcd.io/etcd/wal/walpb"
+ "go.uber.org/zap"
+)
+
+const (
+ // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
+ // Assuming the RTT is around 10ms, 1MB max size is large enough.
+ maxSizePerMsg = 1 * 1024 * 1024
+ // Never overflow the rafthttp buffer, which is 4096.
+ // TODO: a better const?
+ maxInflightMsgs = 4096 / 8
+)
+
+var (
+ // protects raftStatus
+ raftStatusMu sync.Mutex
+ // indirection for expvar func interface
+ // expvar panics when publishing duplicate name
+ // expvar does not support remove a registered name
+ // so only register a func that calls raftStatus
+ // and change raftStatus as we need.
+ raftStatus func() raft.Status
+)
+
+func init() {
+ expvar.Publish("raft.status", expvar.Func(func() interface{} {
+ raftStatusMu.Lock()
+ defer raftStatusMu.Unlock()
+ return raftStatus()
+ }))
+}
+
+// apply contains entries, snapshot to be applied. Once
+// an apply is consumed, the entries will be persisted to
+// to raft storage concurrently; the application must read
+// raftDone before assuming the raft messages are stable.
+type apply struct {
+ entries []raftpb.Entry
+ snapshot raftpb.Snapshot
+ // notifyc synchronizes etcd server applies with the raft node
+ notifyc chan struct{}
+}
+
+type raftNode struct {
+ lg *zap.Logger
+
+ tickMu *sync.Mutex
+ raftNodeConfig
+
+ // a chan to send/receive snapshot
+ msgSnapC chan raftpb.Message
+
+ // a chan to send out apply
+ applyc chan apply
+
+ // a chan to send out readState
+ readStateC chan raft.ReadState
+
+ // utility
+ ticker *time.Ticker
+ // contention detectors for raft heartbeat message
+ td *contention.TimeoutDetector
+
+ stopped chan struct{}
+ done chan struct{}
+}
+
+type raftNodeConfig struct {
+ lg *zap.Logger
+
+ // to check if msg receiver is removed from cluster
+ isIDRemoved func(id uint64) bool
+ raft.Node
+ raftStorage *raft.MemoryStorage
+ storage Storage
+ heartbeat time.Duration // for logging
+ // transport specifies the transport to send and receive msgs to members.
+ // Sending messages MUST NOT block. It is okay to drop messages, since
+ // clients should timeout and reissue their messages.
+ // If transport is nil, server will panic.
+ transport rafthttp.Transporter
+}
+
+func newRaftNode(cfg raftNodeConfig) *raftNode {
+ var lg raft.Logger
+ if cfg.lg != nil {
+ lg = logutil.NewRaftLoggerZap(cfg.lg)
+ } else {
+ lcfg := logutil.DefaultZapLoggerConfig
+ var err error
+ lg, err = logutil.NewRaftLogger(&lcfg)
+ if err != nil {
+ log.Fatalf("cannot create raft logger %v", err)
+ }
+ }
+ raft.SetLogger(lg)
+ r := &raftNode{
+ lg: cfg.lg,
+ tickMu: new(sync.Mutex),
+ raftNodeConfig: cfg,
+ // set up contention detectors for raft heartbeat message.
+ // expect to send a heartbeat within 2 heartbeat intervals.
+ td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
+ readStateC: make(chan raft.ReadState, 1),
+ msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
+ applyc: make(chan apply),
+ stopped: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ if r.heartbeat == 0 {
+ r.ticker = &time.Ticker{}
+ } else {
+ r.ticker = time.NewTicker(r.heartbeat)
+ }
+ return r
+}
+
+// raft.Node does not have locks in Raft package
+func (r *raftNode) tick() {
+ r.tickMu.Lock()
+ r.Tick()
+ r.tickMu.Unlock()
+}
+
+// start prepares and starts raftNode in a new goroutine. It is no longer safe
+// to modify the fields after it has been started.
+func (r *raftNode) start(rh *raftReadyHandler) {
+ internalTimeout := time.Second
+
+ go func() {
+ defer r.onStop()
+ islead := false
+
+ for {
+ select {
+ case <-r.ticker.C:
+ r.tick()
+ case rd := <-r.Ready():
+ if rd.SoftState != nil {
+ newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead
+ if newLeader {
+ leaderChanges.Inc()
+ }
+
+ if rd.SoftState.Lead == raft.None {
+ hasLeader.Set(0)
+ } else {
+ hasLeader.Set(1)
+ }
+
+ rh.updateLead(rd.SoftState.Lead)
+ islead = rd.RaftState == raft.StateLeader
+ if islead {
+ isLeader.Set(1)
+ } else {
+ isLeader.Set(0)
+ }
+ rh.updateLeadership(newLeader)
+ r.td.Reset()
+ }
+
+ if len(rd.ReadStates) != 0 {
+ select {
+ case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
+ case <-time.After(internalTimeout):
+ if r.lg != nil {
+ r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout))
+ } else {
+ plog.Warningf("timed out sending read state")
+ }
+ case <-r.stopped:
+ return
+ }
+ }
+
+ notifyc := make(chan struct{}, 1)
+ ap := apply{
+ entries: rd.CommittedEntries,
+ snapshot: rd.Snapshot,
+ notifyc: notifyc,
+ }
+
+ updateCommittedIndex(&ap, rh)
+
+ select {
+ case r.applyc <- ap:
+ case <-r.stopped:
+ return
+ }
+
+ // the leader can write to its disk in parallel with replicating to the followers and them
+ // writing to their disks.
+ // For more details, check raft thesis 10.2.1
+ if islead {
+ // gofail: var raftBeforeLeaderSend struct{}
+ r.transport.Send(r.processMessages(rd.Messages))
+ }
+
+ // Must save the snapshot file and WAL snapshot entry before saving any other entries or hardstate to
+ // ensure that recovery after a snapshot restore is possible.
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ // gofail: var raftBeforeSaveSnap struct{}
+ if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
+ if r.lg != nil {
+ r.lg.Fatal("failed to save Raft snapshot", zap.Error(err))
+ } else {
+ plog.Fatalf("failed to save Raft snapshot %v", err)
+ }
+ }
+ // gofail: var raftAfterSaveSnap struct{}
+ }
+
+ // gofail: var raftBeforeSave struct{}
+ if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
+ if r.lg != nil {
+ r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err))
+ } else {
+ plog.Fatalf("failed to save state and entries error: %v", err)
+ }
+ }
+ if !raft.IsEmptyHardState(rd.HardState) {
+ proposalsCommitted.Set(float64(rd.HardState.Commit))
+ }
+ // gofail: var raftAfterSave struct{}
+
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ // Force WAL to fsync its hard state before Release() releases
+ // old data from the WAL. Otherwise could get an error like:
+ // panic: tocommit(107) is out of range [lastIndex(84)]. Was the raft log corrupted, truncated, or lost?
+ // See https://github.com/etcd-io/etcd/issues/10219 for more details.
+ if err := r.storage.Sync(); err != nil {
+ if r.lg != nil {
+ r.lg.Fatal("failed to sync Raft snapshot", zap.Error(err))
+ } else {
+ plog.Fatalf("failed to sync Raft snapshot %v", err)
+ }
+ }
+
+ // etcdserver now claim the snapshot has been persisted onto the disk
+ notifyc <- struct{}{}
+
+ // gofail: var raftBeforeApplySnap struct{}
+ r.raftStorage.ApplySnapshot(rd.Snapshot)
+ if r.lg != nil {
+ r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index))
+ } else {
+ plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
+ }
+ // gofail: var raftAfterApplySnap struct{}
+
+ if err := r.storage.Release(rd.Snapshot); err != nil {
+ if r.lg != nil {
+ r.lg.Fatal("failed to release Raft wal", zap.Error(err))
+ } else {
+ plog.Fatalf("failed to release Raft wal %v", err)
+ }
+ }
+ // gofail: var raftAfterWALRelease struct{}
+ }
+
+ r.raftStorage.Append(rd.Entries)
+
+ if !islead {
+ // finish processing incoming messages before we signal raftdone chan
+ msgs := r.processMessages(rd.Messages)
+
+ // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
+ notifyc <- struct{}{}
+
+ // Candidate or follower needs to wait for all pending configuration
+ // changes to be applied before sending messages.
+ // Otherwise we might incorrectly count votes (e.g. votes from removed members).
+ // Also slow machine's follower raft-layer could proceed to become the leader
+ // on its own single-node cluster, before apply-layer applies the config change.
+ // We simply wait for ALL pending entries to be applied for now.
+ // We might improve this later on if it causes unnecessary long blocking issues.
+ waitApply := false
+ for _, ent := range rd.CommittedEntries {
+ if ent.Type == raftpb.EntryConfChange {
+ waitApply = true
+ break
+ }
+ }
+ if waitApply {
+ // blocks until 'applyAll' calls 'applyWait.Trigger'
+ // to be in sync with scheduled config-change job
+ // (assume notifyc has cap of 1)
+ select {
+ case notifyc <- struct{}{}:
+ case <-r.stopped:
+ return
+ }
+ }
+
+ // gofail: var raftBeforeFollowerSend struct{}
+ r.transport.Send(msgs)
+ } else {
+ // leader already processed 'MsgSnap' and signaled
+ notifyc <- struct{}{}
+ }
+
+ r.Advance()
+ case <-r.stopped:
+ return
+ }
+ }
+ }()
+}
+
+func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
+ var ci uint64
+ if len(ap.entries) != 0 {
+ ci = ap.entries[len(ap.entries)-1].Index
+ }
+ if ap.snapshot.Metadata.Index > ci {
+ ci = ap.snapshot.Metadata.Index
+ }
+ if ci != 0 {
+ rh.updateCommittedIndex(ci)
+ }
+}
+
+func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
+ sentAppResp := false
+ for i := len(ms) - 1; i >= 0; i-- {
+ if r.isIDRemoved(ms[i].To) {
+ ms[i].To = 0
+ }
+
+ if ms[i].Type == raftpb.MsgAppResp {
+ if sentAppResp {
+ ms[i].To = 0
+ } else {
+ sentAppResp = true
+ }
+ }
+
+ if ms[i].Type == raftpb.MsgSnap {
+ // There are two separate data store: the store for v2, and the KV for v3.
+ // The msgSnap only contains the most recent snapshot of store without KV.
+ // So we need to redirect the msgSnap to etcd server main loop for merging in the
+ // current store snapshot and KV snapshot.
+ select {
+ case r.msgSnapC <- ms[i]:
+ default:
+ // drop msgSnap if the inflight chan if full.
+ }
+ ms[i].To = 0
+ }
+ if ms[i].Type == raftpb.MsgHeartbeat {
+ ok, exceed := r.td.Observe(ms[i].To)
+ if !ok {
+ // TODO: limit request rate.
+ if r.lg != nil {
+ r.lg.Warn(
+ "leader failed to send out heartbeat on time; took too long, leader is overloaded likely from slow disk",
+ zap.String("to", fmt.Sprintf("%x", ms[i].To)),
+ zap.Duration("heartbeat-interval", r.heartbeat),
+ zap.Duration("expected-duration", 2*r.heartbeat),
+ zap.Duration("exceeded-duration", exceed),
+ )
+ } else {
+ plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v, to %x)", r.heartbeat, exceed, ms[i].To)
+ plog.Warningf("server is likely overloaded")
+ }
+ heartbeatSendFailures.Inc()
+ }
+ }
+ }
+ return ms
+}
+
+func (r *raftNode) apply() chan apply {
+ return r.applyc
+}
+
+func (r *raftNode) stop() {
+ r.stopped <- struct{}{}
+ <-r.done
+}
+
+func (r *raftNode) onStop() {
+ r.Stop()
+ r.ticker.Stop()
+ r.transport.Stop()
+ if err := r.storage.Close(); err != nil {
+ if r.lg != nil {
+ r.lg.Panic("failed to close Raft storage", zap.Error(err))
+ } else {
+ plog.Panicf("raft close storage error: %v", err)
+ }
+ }
+ close(r.done)
+}
+
+// for testing
+func (r *raftNode) pauseSending() {
+ p := r.transport.(rafthttp.Pausable)
+ p.Pause()
+}
+
+func (r *raftNode) resumeSending() {
+ p := r.transport.(rafthttp.Pausable)
+ p.Resume()
+}
+
+// advanceTicks advances ticks of Raft node.
+// This can be used for fast-forwarding election
+// ticks in multi data-center deployments, thus
+// speeding up election process.
+func (r *raftNode) advanceTicks(ticks int) {
+ for i := 0; i < ticks; i++ {
+ r.tick()
+ }
+}
+
+func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
+ var err error
+ member := cl.MemberByName(cfg.Name)
+ metadata := pbutil.MustMarshal(
+ &pb.Metadata{
+ NodeID: uint64(member.ID),
+ ClusterID: uint64(cl.ID()),
+ },
+ )
+ if w, err = wal.Create(cfg.Logger, cfg.WALDir(), metadata); err != nil {
+ if cfg.Logger != nil {
+ cfg.Logger.Panic("failed to create WAL", zap.Error(err))
+ } else {
+ plog.Panicf("create wal error: %v", err)
+ }
+ }
+ peers := make([]raft.Peer, len(ids))
+ for i, id := range ids {
+ var ctx []byte
+ ctx, err = json.Marshal((*cl).Member(id))
+ if err != nil {
+ if cfg.Logger != nil {
+ cfg.Logger.Panic("failed to marshal member", zap.Error(err))
+ } else {
+ plog.Panicf("marshal member should never fail: %v", err)
+ }
+ }
+ peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
+ }
+ id = member.ID
+ if cfg.Logger != nil {
+ cfg.Logger.Info(
+ "starting local member",
+ zap.String("local-member-id", id.String()),
+ zap.String("cluster-id", cl.ID().String()),
+ )
+ } else {
+ plog.Infof("starting member %s in cluster %s", id, cl.ID())
+ }
+ s = raft.NewMemoryStorage()
+ c := &raft.Config{
+ ID: uint64(id),
+ ElectionTick: cfg.ElectionTicks,
+ HeartbeatTick: 1,
+ Storage: s,
+ MaxSizePerMsg: maxSizePerMsg,
+ MaxInflightMsgs: maxInflightMsgs,
+ CheckQuorum: true,
+ PreVote: cfg.PreVote,
+ }
+ if cfg.Logger != nil {
+ // called after capnslog setting in "init" function
+ if cfg.LoggerConfig != nil {
+ c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
+ if err != nil {
+ log.Fatalf("cannot create raft logger %v", err)
+ }
+ } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil {
+ c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer)
+ }
+ }
+
+ if len(peers) == 0 {
+ n = raft.RestartNode(c)
+ } else {
+ n = raft.StartNode(c, peers)
+ }
+ raftStatusMu.Lock()
+ raftStatus = n.Status
+ raftStatusMu.Unlock()
+ return id, n, s, w
+}
+
+func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
+ var walsnap walpb.Snapshot
+ if snapshot != nil {
+ walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
+ }
+ w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
+
+ if cfg.Logger != nil {
+ cfg.Logger.Info(
+ "restarting local member",
+ zap.String("cluster-id", cid.String()),
+ zap.String("local-member-id", id.String()),
+ zap.Uint64("commit-index", st.Commit),
+ )
+ } else {
+ plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
+ }
+ cl := membership.NewCluster(cfg.Logger, "")
+ cl.SetID(id, cid)
+ s := raft.NewMemoryStorage()
+ if snapshot != nil {
+ s.ApplySnapshot(*snapshot)
+ }
+ s.SetHardState(st)
+ s.Append(ents)
+ c := &raft.Config{
+ ID: uint64(id),
+ ElectionTick: cfg.ElectionTicks,
+ HeartbeatTick: 1,
+ Storage: s,
+ MaxSizePerMsg: maxSizePerMsg,
+ MaxInflightMsgs: maxInflightMsgs,
+ CheckQuorum: true,
+ PreVote: cfg.PreVote,
+ }
+ if cfg.Logger != nil {
+ // called after capnslog setting in "init" function
+ var err error
+ if cfg.LoggerConfig != nil {
+ c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
+ if err != nil {
+ log.Fatalf("cannot create raft logger %v", err)
+ }
+ } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil {
+ c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer)
+ }
+ }
+
+ n := raft.RestartNode(c)
+ raftStatusMu.Lock()
+ raftStatus = n.Status
+ raftStatusMu.Unlock()
+ return id, cl, n, s, w
+}
+
+func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
+ var walsnap walpb.Snapshot
+ if snapshot != nil {
+ walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
+ }
+ w, id, cid, st, ents := readWAL(cfg.Logger, cfg.WALDir(), walsnap)
+
+ // discard the previously uncommitted entries
+ for i, ent := range ents {
+ if ent.Index > st.Commit {
+ if cfg.Logger != nil {
+ cfg.Logger.Info(
+ "discarding uncommitted WAL entries",
+ zap.Uint64("entry-index", ent.Index),
+ zap.Uint64("commit-index-from-wal", st.Commit),
+ zap.Int("number-of-discarded-entries", len(ents)-i),
+ )
+ } else {
+ plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
+ }
+ ents = ents[:i]
+ break
+ }
+ }
+
+ // force append the configuration change entries
+ toAppEnts := createConfigChangeEnts(
+ cfg.Logger,
+ getIDs(cfg.Logger, snapshot, ents),
+ uint64(id),
+ st.Term,
+ st.Commit,
+ )
+ ents = append(ents, toAppEnts...)
+
+ // force commit newly appended entries
+ err := w.Save(raftpb.HardState{}, toAppEnts)
+ if err != nil {
+ if cfg.Logger != nil {
+ cfg.Logger.Fatal("failed to save hard state and entries", zap.Error(err))
+ } else {
+ plog.Fatalf("%v", err)
+ }
+ }
+ if len(ents) != 0 {
+ st.Commit = ents[len(ents)-1].Index
+ }
+
+ if cfg.Logger != nil {
+ cfg.Logger.Info(
+ "forcing restart member",
+ zap.String("cluster-id", cid.String()),
+ zap.String("local-member-id", id.String()),
+ zap.Uint64("commit-index", st.Commit),
+ )
+ } else {
+ plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
+ }
+
+ cl := membership.NewCluster(cfg.Logger, "")
+ cl.SetID(id, cid)
+ s := raft.NewMemoryStorage()
+ if snapshot != nil {
+ s.ApplySnapshot(*snapshot)
+ }
+ s.SetHardState(st)
+ s.Append(ents)
+ c := &raft.Config{
+ ID: uint64(id),
+ ElectionTick: cfg.ElectionTicks,
+ HeartbeatTick: 1,
+ Storage: s,
+ MaxSizePerMsg: maxSizePerMsg,
+ MaxInflightMsgs: maxInflightMsgs,
+ CheckQuorum: true,
+ PreVote: cfg.PreVote,
+ }
+ if cfg.Logger != nil {
+ // called after capnslog setting in "init" function
+ if cfg.LoggerConfig != nil {
+ c.Logger, err = logutil.NewRaftLogger(cfg.LoggerConfig)
+ if err != nil {
+ log.Fatalf("cannot create raft logger %v", err)
+ }
+ } else if cfg.LoggerCore != nil && cfg.LoggerWriteSyncer != nil {
+ c.Logger = logutil.NewRaftLoggerFromZapCore(cfg.LoggerCore, cfg.LoggerWriteSyncer)
+ }
+ }
+
+ n := raft.RestartNode(c)
+ raftStatus = n.Status
+ return id, cl, n, s, w
+}
+
+// getIDs returns an ordered set of IDs included in the given snapshot and
+// the entries. The given snapshot/entries can contain two kinds of
+// ID-related entry:
+// - ConfChangeAddNode, in which case the contained ID will be added into the set.
+// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
+func getIDs(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
+ ids := make(map[uint64]bool)
+ if snap != nil {
+ for _, id := range snap.Metadata.ConfState.Voters {
+ ids[id] = true
+ }
+ }
+ for _, e := range ents {
+ if e.Type != raftpb.EntryConfChange {
+ continue
+ }
+ var cc raftpb.ConfChange
+ pbutil.MustUnmarshal(&cc, e.Data)
+ switch cc.Type {
+ case raftpb.ConfChangeAddNode:
+ ids[cc.NodeID] = true
+ case raftpb.ConfChangeRemoveNode:
+ delete(ids, cc.NodeID)
+ case raftpb.ConfChangeUpdateNode:
+ // do nothing
+ default:
+ if lg != nil {
+ lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String()))
+ } else {
+ plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
+ }
+ }
+ }
+ sids := make(types.Uint64Slice, 0, len(ids))
+ for id := range ids {
+ sids = append(sids, id)
+ }
+ sort.Sort(sids)
+ return []uint64(sids)
+}
+
+// createConfigChangeEnts creates a series of Raft entries (i.e.
+// EntryConfChange) to remove the set of given IDs from the cluster. The ID
+// `self` is _not_ removed, even if present in the set.
+// If `self` is not inside the given ids, it creates a Raft entry to add a
+// default member with the given `self`.
+func createConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
+ found := false
+ for _, id := range ids {
+ if id == self {
+ found = true
+ }
+ }
+
+ var ents []raftpb.Entry
+ next := index + 1
+
+ // NB: always add self first, then remove other nodes. Raft will panic if the
+ // set of voters ever becomes empty.
+ if !found {
+ m := membership.Member{
+ ID: types.ID(self),
+ RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
+ }
+ ctx, err := json.Marshal(m)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to marshal member", zap.Error(err))
+ } else {
+ plog.Panicf("marshal member should never fail: %v", err)
+ }
+ }
+ cc := &raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: self,
+ Context: ctx,
+ }
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(cc),
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ next++
+ }
+
+ for _, id := range ids {
+ if id == self {
+ continue
+ }
+ cc := &raftpb.ConfChange{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: id,
+ }
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(cc),
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ next++
+ }
+
+ return ents
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/server.go b/vendor/go.etcd.io/etcd/etcdserver/server.go
new file mode 100644
index 000000000000..fb98c05b7c6d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/server.go
@@ -0,0 +1,2683 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "encoding/json"
+ "expvar"
+ "fmt"
+ "math"
+ "math/rand"
+ "net/http"
+ "os"
+ "path"
+ "regexp"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/coreos/pkg/capnslog"
+ humanize "github.com/dustin/go-humanize"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.etcd.io/etcd/auth"
+ "go.etcd.io/etcd/etcdserver/api"
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/etcdserver/api/snap"
+ "go.etcd.io/etcd/etcdserver/api/v2discovery"
+ "go.etcd.io/etcd/etcdserver/api/v2http/httptypes"
+ stats "go.etcd.io/etcd/etcdserver/api/v2stats"
+ "go.etcd.io/etcd/etcdserver/api/v2store"
+ "go.etcd.io/etcd/etcdserver/api/v3alarm"
+ "go.etcd.io/etcd/etcdserver/api/v3compactor"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/lease/leasehttp"
+ "go.etcd.io/etcd/mvcc"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/pkg/fileutil"
+ "go.etcd.io/etcd/pkg/idutil"
+ "go.etcd.io/etcd/pkg/pbutil"
+ "go.etcd.io/etcd/pkg/runtime"
+ "go.etcd.io/etcd/pkg/schedule"
+ "go.etcd.io/etcd/pkg/traceutil"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/pkg/wait"
+ "go.etcd.io/etcd/raft"
+ "go.etcd.io/etcd/raft/raftpb"
+ "go.etcd.io/etcd/version"
+ "go.etcd.io/etcd/wal"
+ "go.uber.org/zap"
+)
+
+const (
+ DefaultSnapshotCount = 100000
+
+ // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ DefaultSnapshotCatchUpEntries uint64 = 5000
+
+ StoreClusterPrefix = "/0"
+ StoreKeysPrefix = "/1"
+
+ // HealthInterval is the minimum time the cluster should be healthy
+ // before accepting add member requests.
+ HealthInterval = 5 * time.Second
+
+ purgeFileInterval = 30 * time.Second
+ // monitorVersionInterval should be smaller than the timeout
+ // on the connection. Or we will not be able to reuse the connection
+ // (since it will timeout).
+ monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
+
+ // max number of in-flight snapshot messages etcdserver allows to have
+ // This number is more than enough for most clusters with 5 machines.
+ maxInFlightMsgSnap = 16
+
+ releaseDelayAfterSnapshot = 30 * time.Second
+
+ // maxPendingRevokes is the maximum number of outstanding expired lease revocations.
+ maxPendingRevokes = 16
+
+ recommendedMaxRequestBytes = 10 * 1024 * 1024
+
+ readyPercent = 0.9
+)
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "etcdserver")
+
+ storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+
+ expvar.Publish(
+ "file_descriptor_limit",
+ expvar.Func(
+ func() interface{} {
+ n, _ := runtime.FDLimit()
+ return n
+ },
+ ),
+ )
+}
+
+type Response struct {
+ Term uint64
+ Index uint64
+ Event *v2store.Event
+ Watcher v2store.Watcher
+ Err error
+}
+
+type ServerV2 interface {
+ Server
+ Leader() types.ID
+
+ // Do takes a V2 request and attempts to fulfill it, returning a Response.
+ Do(ctx context.Context, r pb.Request) (Response, error)
+ stats.Stats
+ ClientCertAuthEnabled() bool
+}
+
+type ServerV3 interface {
+ Server
+ RaftStatusGetter
+}
+
+func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
+
+type Server interface {
+ // AddMember attempts to add a member into the cluster. It will return
+ // ErrIDRemoved if member ID is removed from the cluster, or return
+ // ErrIDExists if member ID exists in the cluster.
+ AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
+ // RemoveMember attempts to remove a member from the cluster. It will
+ // return ErrIDRemoved if member ID is removed from the cluster, or return
+ // ErrIDNotFound if member ID is not in the cluster.
+ RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
+ // UpdateMember attempts to update an existing member in the cluster. It will
+ // return ErrIDNotFound if the member ID does not exist.
+ UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
+ // PromoteMember attempts to promote a non-voting node to a voting node. It will
+ // return ErrIDNotFound if the member ID does not exist.
+ // return ErrLearnerNotReady if the member are not ready.
+ // return ErrMemberNotLearner if the member is not a learner.
+ PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error)
+
+ // ClusterVersion is the cluster-wide minimum major.minor version.
+ // Cluster version is set to the min version that an etcd member is
+ // compatible with when first bootstrap.
+ //
+ // ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
+ //
+ // During a rolling upgrades, the ClusterVersion will be updated
+ // automatically after a sync. (5 second by default)
+ //
+ // The API/raft component can utilize ClusterVersion to determine if
+ // it can accept a client request or a raft RPC.
+ // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
+ // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
+ // this feature is introduced post 2.0.
+ ClusterVersion() *semver.Version
+ Cluster() api.Cluster
+ Alarms() []*pb.AlarmMember
+}
+
+// EtcdServer is the production implementation of the Server interface
+type EtcdServer struct {
+ // inflightSnapshots holds count the number of snapshots currently inflight.
+ inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
+ appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
+ committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
+ term uint64 // must use atomic operations to access; keep 64-bit aligned.
+ lead uint64 // must use atomic operations to access; keep 64-bit aligned.
+
+ // consistIndex used to hold the offset of current executing entry
+ // It is initialized to 0 before executing any entry.
+ consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
+ r raftNode // uses 64-bit atomics; keep 64-bit aligned.
+
+ readych chan struct{}
+ Cfg ServerConfig
+
+ lgMu *sync.RWMutex
+ lg *zap.Logger
+
+ w wait.Wait
+
+ readMu sync.RWMutex
+ // read routine notifies etcd server that it waits for reading by sending an empty struct to
+ // readwaitC
+ readwaitc chan struct{}
+ // readNotifier is used to notify the read routine that it can process the request
+ // when there is no error
+ readNotifier *notifier
+
+ // stop signals the run goroutine should shutdown.
+ stop chan struct{}
+ // stopping is closed by run goroutine on shutdown.
+ stopping chan struct{}
+ // done is closed when all goroutines from start() complete.
+ done chan struct{}
+ // leaderChanged is used to notify the linearizable read loop to drop the old read requests.
+ leaderChanged chan struct{}
+ leaderChangedMu sync.RWMutex
+
+ errorc chan error
+ id types.ID
+ attributes membership.Attributes
+
+ cluster *membership.RaftCluster
+
+ v2store v2store.Store
+ snapshotter *snap.Snapshotter
+
+ applyV2 ApplierV2
+
+ // applyV3 is the applier with auth and quotas
+ applyV3 applierV3
+ // applyV3Base is the core applier without auth or quotas
+ applyV3Base applierV3
+ applyWait wait.WaitTime
+
+ kv mvcc.ConsistentWatchableKV
+ lessor lease.Lessor
+ bemu sync.Mutex
+ be backend.Backend
+ authStore auth.AuthStore
+ alarmStore *v3alarm.AlarmStore
+
+ stats *stats.ServerStats
+ lstats *stats.LeaderStats
+
+ SyncTicker *time.Ticker
+ // compactor is used to auto-compact the KV.
+ compactor v3compactor.Compactor
+
+ // peerRt used to send requests (version, lease) to peers.
+ peerRt http.RoundTripper
+ reqIDGen *idutil.Generator
+
+ // forceVersionC is used to force the version monitor loop
+ // to detect the cluster version immediately.
+ forceVersionC chan struct{}
+
+ // wgMu blocks concurrent waitgroup mutation while server stopping
+ wgMu sync.RWMutex
+ // wg is used to wait for the go routines that depends on the server state
+ // to exit when stopping the server.
+ wg sync.WaitGroup
+
+ // ctx is used for etcd-initiated requests that may need to be canceled
+ // on etcd server shutdown.
+ ctx context.Context
+ cancel context.CancelFunc
+
+ leadTimeMu sync.RWMutex
+ leadElectedTime time.Time
+
+ *AccessController
+}
+
+// NewServer creates a new EtcdServer from the supplied configuration. The
+// configuration is considered static for the lifetime of the EtcdServer.
+func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
+ st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
+
+ var (
+ w *wal.WAL
+ n raft.Node
+ s *raft.MemoryStorage
+ id types.ID
+ cl *membership.RaftCluster
+ )
+
+ if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
+ if cfg.Logger != nil {
+ cfg.Logger.Warn(
+ "exceeded recommended request limit",
+ zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
+ zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
+ zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
+ zap.String("recommended-request-size", humanize.Bytes(uint64(recommendedMaxRequestBytes))),
+ )
+ } else {
+ plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
+ }
+ }
+
+ if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
+ return nil, fmt.Errorf("cannot access data directory: %v", terr)
+ }
+
+ haveWAL := wal.Exist(cfg.WALDir())
+
+ if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
+ if cfg.Logger != nil {
+ cfg.Logger.Fatal(
+ "failed to create snapshot directory",
+ zap.String("path", cfg.SnapDir()),
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("create snapshot directory error: %v", err)
+ }
+ }
+ ss := snap.New(cfg.Logger, cfg.SnapDir())
+
+ bepath := cfg.backendPath()
+ beExist := fileutil.Exist(bepath)
+ be := openBackend(cfg)
+
+ defer func() {
+ if err != nil {
+ be.Close()
+ }
+ }()
+
+ prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
+ if err != nil {
+ return nil, err
+ }
+ var (
+ remotes []*membership.Member
+ snapshot *raftpb.Snapshot
+ )
+
+ switch {
+ case !haveWAL && !cfg.NewCluster:
+ if err = cfg.VerifyJoinExisting(); err != nil {
+ return nil, err
+ }
+ cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
+ if err != nil {
+ return nil, err
+ }
+ existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
+ if gerr != nil {
+ return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
+ }
+ if err = membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
+ return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
+ }
+ if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt) {
+ return nil, fmt.Errorf("incompatible with current running cluster")
+ }
+
+ remotes = existingCluster.Members()
+ cl.SetID(types.ID(0), existingCluster.ID())
+ cl.SetStore(st)
+ cl.SetBackend(be)
+ id, n, s, w = startNode(cfg, cl, nil)
+ cl.SetID(id, existingCluster.ID())
+
+ case !haveWAL && cfg.NewCluster:
+ if err = cfg.VerifyBootstrap(); err != nil {
+ return nil, err
+ }
+ cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
+ if err != nil {
+ return nil, err
+ }
+ m := cl.MemberByName(cfg.Name)
+ if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
+ return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
+ }
+ if cfg.ShouldDiscover() {
+ var str string
+ str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
+ if err != nil {
+ return nil, &DiscoveryError{Op: "join", Err: err}
+ }
+ var urlsmap types.URLsMap
+ urlsmap, err = types.NewURLsMap(str)
+ if err != nil {
+ return nil, err
+ }
+ if checkDuplicateURL(urlsmap) {
+ return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
+ }
+ if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap); err != nil {
+ return nil, err
+ }
+ }
+ cl.SetStore(st)
+ cl.SetBackend(be)
+ id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
+ cl.SetID(id, cl.ID())
+
+ case haveWAL:
+ if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
+ return nil, fmt.Errorf("cannot write to member directory: %v", err)
+ }
+
+ if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
+ return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
+ }
+
+ if cfg.ShouldDiscover() {
+ if cfg.Logger != nil {
+ cfg.Logger.Warn(
+ "discovery token is ignored since cluster already initialized; valid logs are found",
+ zap.String("wal-dir", cfg.WALDir()),
+ )
+ } else {
+ plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
+ }
+ }
+
+ // Find a snapshot to start/restart a raft node
+ walSnaps, serr := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
+ if serr != nil {
+ return nil, serr
+ }
+ // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
+ // wal log entries
+ snapshot, err = ss.LoadNewestAvailable(walSnaps)
+ if err != nil && err != snap.ErrNoSnapshot {
+ return nil, err
+ }
+
+ if snapshot != nil {
+ if err = st.Recovery(snapshot.Data); err != nil {
+ if cfg.Logger != nil {
+ cfg.Logger.Panic("failed to recover from snapshot")
+ } else {
+ plog.Panicf("recovered store from snapshot error: %v", err)
+ }
+ }
+
+ if cfg.Logger != nil {
+ cfg.Logger.Info(
+ "recovered v2 store from snapshot",
+ zap.Uint64("snapshot-index", snapshot.Metadata.Index),
+ zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
+ )
+ } else {
+ plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
+ }
+
+ if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil {
+ if cfg.Logger != nil {
+ cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
+ } else {
+ plog.Panicf("recovering backend from snapshot error: %v", err)
+ }
+ }
+ if cfg.Logger != nil {
+ s1, s2 := be.Size(), be.SizeInUse()
+ cfg.Logger.Info(
+ "recovered v3 backend from snapshot",
+ zap.Int64("backend-size-bytes", s1),
+ zap.String("backend-size", humanize.Bytes(uint64(s1))),
+ zap.Int64("backend-size-in-use-bytes", s2),
+ zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
+ )
+ }
+ }
+
+ if !cfg.ForceNewCluster {
+ id, cl, n, s, w = restartNode(cfg, snapshot)
+ } else {
+ id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
+ }
+
+ cl.SetStore(st)
+ cl.SetBackend(be)
+ cl.Recover(api.UpdateCapability)
+ if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
+ os.RemoveAll(bepath)
+ return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
+ }
+
+ default:
+ return nil, fmt.Errorf("unsupported bootstrap config")
+ }
+
+ if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
+ return nil, fmt.Errorf("cannot access member directory: %v", terr)
+ }
+
+ sstats := stats.NewServerStats(cfg.Name, id.String())
+ lstats := stats.NewLeaderStats(id.String())
+
+ heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
+ srv = &EtcdServer{
+ readych: make(chan struct{}),
+ Cfg: cfg,
+ lgMu: new(sync.RWMutex),
+ lg: cfg.Logger,
+ errorc: make(chan error, 1),
+ v2store: st,
+ snapshotter: ss,
+ r: *newRaftNode(
+ raftNodeConfig{
+ lg: cfg.Logger,
+ isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
+ Node: n,
+ heartbeat: heartbeat,
+ raftStorage: s,
+ storage: NewStorage(w, ss),
+ },
+ ),
+ id: id,
+ attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
+ cluster: cl,
+ stats: sstats,
+ lstats: lstats,
+ SyncTicker: time.NewTicker(500 * time.Millisecond),
+ peerRt: prt,
+ reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
+ forceVersionC: make(chan struct{}),
+ AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
+ }
+ serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
+
+ srv.applyV2 = &applierV2store{store: srv.v2store, cluster: srv.cluster}
+
+ srv.be = be
+ minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
+
+ // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
+ // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
+ srv.lessor = lease.NewLessor(
+ srv.getLogger(),
+ srv.be,
+ lease.LessorConfig{
+ MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())),
+ CheckpointInterval: cfg.LeaseCheckpointInterval,
+ ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(),
+ })
+
+ tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken,
+ func(index uint64) <-chan struct{} {
+ return srv.applyWait.Wait(index)
+ },
+ )
+ if err != nil {
+ if cfg.Logger != nil {
+ cfg.Logger.Warn("failed to create token provider", zap.Error(err))
+ } else {
+ plog.Warningf("failed to create token provider,err is %v", err)
+ }
+ return nil, err
+ }
+ srv.authStore = auth.NewAuthStore(srv.getLogger(), srv.be, tp, int(cfg.BcryptCost))
+
+ srv.kv = mvcc.New(srv.getLogger(), srv.be, srv.lessor, srv.authStore, &srv.consistIndex, mvcc.StoreConfig{CompactionBatchLimit: cfg.CompactionBatchLimit})
+ if beExist {
+ kvindex := srv.kv.ConsistentIndex()
+ // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
+ // etcd from pre-3.0 release.
+ if snapshot != nil && kvindex < snapshot.Metadata.Index {
+ if kvindex != 0 {
+ return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", bepath, kvindex, snapshot.Metadata.Index)
+ }
+ if cfg.Logger != nil {
+ cfg.Logger.Warn(
+ "consistent index was never saved",
+ zap.Uint64("snapshot-index", snapshot.Metadata.Index),
+ )
+ } else {
+ plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
+ }
+ }
+ }
+ newSrv := srv // since srv == nil in defer if srv is returned as nil
+ defer func() {
+ // closing backend without first closing kv can cause
+ // resumed compactions to fail with closed tx errors
+ if err != nil {
+ newSrv.kv.Close()
+ }
+ }()
+
+ srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
+ if num := cfg.AutoCompactionRetention; num != 0 {
+ srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv)
+ if err != nil {
+ return nil, err
+ }
+ srv.compactor.Run()
+ }
+
+ srv.applyV3Base = srv.newApplierV3Backend()
+ if err = srv.restoreAlarms(); err != nil {
+ return nil, err
+ }
+
+ if srv.Cfg.EnableLeaseCheckpoint {
+ // setting checkpointer enables lease checkpoint feature.
+ srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) {
+ srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp})
+ })
+ }
+
+ // TODO: move transport initialization near the definition of remote
+ tr := &rafthttp.Transport{
+ Logger: cfg.Logger,
+ TLSInfo: cfg.PeerTLSInfo,
+ DialTimeout: cfg.peerDialTimeout(),
+ ID: id,
+ URLs: cfg.PeerURLs,
+ ClusterID: cl.ID(),
+ Raft: srv,
+ Snapshotter: ss,
+ ServerStats: sstats,
+ LeaderStats: lstats,
+ ErrorC: srv.errorc,
+ }
+ if err = tr.Start(); err != nil {
+ return nil, err
+ }
+ // add all remotes into transport
+ for _, m := range remotes {
+ if m.ID != id {
+ tr.AddRemote(m.ID, m.PeerURLs)
+ }
+ }
+ for _, m := range cl.Members() {
+ if m.ID != id {
+ tr.AddPeer(m.ID, m.PeerURLs)
+ }
+ }
+ srv.r.transport = tr
+
+ return srv, nil
+}
+
+func (s *EtcdServer) getLogger() *zap.Logger {
+ s.lgMu.RLock()
+ l := s.lg
+ s.lgMu.RUnlock()
+ return l
+}
+
+func tickToDur(ticks int, tickMs uint) string {
+ return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond)
+}
+
+func (s *EtcdServer) adjustTicks() {
+ lg := s.getLogger()
+ clusterN := len(s.cluster.Members())
+
+ // single-node fresh start, or single-node recovers from snapshot
+ if clusterN == 1 {
+ ticks := s.Cfg.ElectionTicks - 1
+ if lg != nil {
+ lg.Info(
+ "started as single-node; fast-forwarding election ticks",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int("forward-ticks", ticks),
+ zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
+ zap.Int("election-ticks", s.Cfg.ElectionTicks),
+ zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
+ )
+ } else {
+ plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks)
+ }
+ s.r.advanceTicks(ticks)
+ return
+ }
+
+ if !s.Cfg.InitialElectionTickAdvance {
+ if lg != nil {
+ lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
+ }
+ return
+ }
+ if lg != nil {
+ lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
+ }
+
+ // retry up to "rafthttp.ConnReadTimeout", which is 5-sec
+ // until peer connection reports; otherwise:
+ // 1. all connections failed, or
+ // 2. no active peers, or
+ // 3. restarted single-node with no snapshot
+ // then, do nothing, because advancing ticks would have no effect
+ waitTime := rafthttp.ConnReadTimeout
+ itv := 50 * time.Millisecond
+ for i := int64(0); i < int64(waitTime/itv); i++ {
+ select {
+ case <-time.After(itv):
+ case <-s.stopping:
+ return
+ }
+
+ peerN := s.r.transport.ActivePeers()
+ if peerN > 1 {
+ // multi-node received peer connection reports
+ // adjust ticks, in case slow leader message receive
+ ticks := s.Cfg.ElectionTicks - 2
+
+ if lg != nil {
+ lg.Info(
+ "initialized peer connections; fast-forwarding election ticks",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Int("forward-ticks", ticks),
+ zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
+ zap.Int("election-ticks", s.Cfg.ElectionTicks),
+ zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
+ zap.Int("active-remote-members", peerN),
+ )
+ } else {
+ plog.Infof("%s initialized peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN)
+ }
+
+ s.r.advanceTicks(ticks)
+ return
+ }
+ }
+}
+
+// Start performs any initialization of the Server necessary for it to
+// begin serving requests. It must be called before Do or Process.
+// Start must be non-blocking; any long-running server functionality
+// should be implemented in goroutines.
+func (s *EtcdServer) Start() {
+ s.start()
+ s.goAttach(func() { s.adjustTicks() })
+ s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
+ s.goAttach(s.purgeFile)
+ s.goAttach(func() { monitorFileDescriptor(s.getLogger(), s.stopping) })
+ s.goAttach(s.monitorVersions)
+ s.goAttach(s.linearizableReadLoop)
+ s.goAttach(s.monitorKVHash)
+}
+
+// start prepares and starts server in a new goroutine. It is no longer safe to
+// modify a server's fields after it has been sent to Start.
+// This function is just used for testing.
+func (s *EtcdServer) start() {
+ lg := s.getLogger()
+
+ if s.Cfg.SnapshotCount == 0 {
+ if lg != nil {
+ lg.Info(
+ "updating snapshot-count to default",
+ zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount),
+ zap.Uint64("updated-snapshot-count", DefaultSnapshotCount),
+ )
+ } else {
+ plog.Infof("set snapshot count to default %d", DefaultSnapshotCount)
+ }
+ s.Cfg.SnapshotCount = DefaultSnapshotCount
+ }
+ if s.Cfg.SnapshotCatchUpEntries == 0 {
+ if lg != nil {
+ lg.Info(
+ "updating snapshot catch-up entries to default",
+ zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries),
+ zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries),
+ )
+ }
+ s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries
+ }
+
+ s.w = wait.New()
+ s.applyWait = wait.NewTimeList()
+ s.done = make(chan struct{})
+ s.stop = make(chan struct{})
+ s.stopping = make(chan struct{})
+ s.ctx, s.cancel = context.WithCancel(context.Background())
+ s.readwaitc = make(chan struct{}, 1)
+ s.readNotifier = newNotifier()
+ s.leaderChanged = make(chan struct{})
+ if s.ClusterVersion() != nil {
+ if lg != nil {
+ lg.Info(
+ "starting etcd server",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("local-server-version", version.Version),
+ zap.String("cluster-id", s.Cluster().ID().String()),
+ zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())),
+ )
+ } else {
+ plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
+ }
+ membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(s.ClusterVersion().String())}).Set(1)
+ } else {
+ if lg != nil {
+ lg.Info(
+ "starting etcd server",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("local-server-version", version.Version),
+ zap.String("cluster-version", "to_be_decided"),
+ )
+ } else {
+ plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version)
+ }
+ }
+
+ // TODO: if this is an empty log, writes all peer infos
+ // into the first entry
+ go s.run()
+}
+
+func (s *EtcdServer) purgeFile() {
+ var dberrc, serrc, werrc <-chan error
+ var dbdonec, sdonec, wdonec <-chan struct{}
+ if s.Cfg.MaxSnapFiles > 0 {
+ dbdonec, dberrc = fileutil.PurgeFileWithDoneNotify(s.getLogger(), s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
+ sdonec, serrc = fileutil.PurgeFileWithDoneNotify(s.getLogger(), s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
+ }
+ if s.Cfg.MaxWALFiles > 0 {
+ wdonec, werrc = fileutil.PurgeFileWithDoneNotify(s.getLogger(), s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping)
+ }
+
+ lg := s.getLogger()
+ select {
+ case e := <-dberrc:
+ if lg != nil {
+ lg.Fatal("failed to purge snap db file", zap.Error(e))
+ } else {
+ plog.Fatalf("failed to purge snap db file %v", e)
+ }
+ case e := <-serrc:
+ if lg != nil {
+ lg.Fatal("failed to purge snap file", zap.Error(e))
+ } else {
+ plog.Fatalf("failed to purge snap file %v", e)
+ }
+ case e := <-werrc:
+ if lg != nil {
+ lg.Fatal("failed to purge wal file", zap.Error(e))
+ } else {
+ plog.Fatalf("failed to purge wal file %v", e)
+ }
+ case <-s.stopping:
+ if dbdonec != nil {
+ <-dbdonec
+ }
+ if sdonec != nil {
+ <-sdonec
+ }
+ if wdonec != nil {
+ <-wdonec
+ }
+ return
+ }
+}
+
+func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
+
+func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
+
+type ServerPeer interface {
+ ServerV2
+ RaftHandler() http.Handler
+ LeaseHandler() http.Handler
+}
+
+func (s *EtcdServer) LeaseHandler() http.Handler {
+ if s.lessor == nil {
+ return nil
+ }
+ return leasehttp.NewHandler(s.lessor, s.ApplyWait)
+}
+
+func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
+
+// Process takes a raft message and applies it to the server's raft state
+// machine, respecting any timeout of the given context.
+func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
+ if s.cluster.IsIDRemoved(types.ID(m.From)) {
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn(
+ "rejected Raft message from removed member",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("removed-member-id", types.ID(m.From).String()),
+ )
+ } else {
+ plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
+ }
+ return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
+ }
+ if m.Type == raftpb.MsgApp {
+ s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
+ }
+ return s.r.Step(ctx, m)
+}
+
+func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
+
+func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
+
+// ReportSnapshot reports snapshot sent status to the raft state machine,
+// and clears the used snapshot from the snapshot store.
+func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
+ s.r.ReportSnapshot(id, status)
+}
+
+type etcdProgress struct {
+ confState raftpb.ConfState
+ snapi uint64
+ appliedt uint64
+ appliedi uint64
+}
+
+// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
+// and helps decouple state machine logic from Raft algorithms.
+// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
+type raftReadyHandler struct {
+ getLead func() (lead uint64)
+ updateLead func(lead uint64)
+ updateLeadership func(newLeader bool)
+ updateCommittedIndex func(uint64)
+}
+
+func (s *EtcdServer) run() {
+ lg := s.getLogger()
+
+ sn, err := s.r.raftStorage.Snapshot()
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to get snapshot from Raft storage", zap.Error(err))
+ } else {
+ plog.Panicf("get snapshot from raft storage error: %v", err)
+ }
+ }
+
+ // asynchronously accept apply packets, dispatch progress in-order
+ sched := schedule.NewFIFOScheduler()
+
+ var (
+ smu sync.RWMutex
+ syncC <-chan time.Time
+ )
+ setSyncC := func(ch <-chan time.Time) {
+ smu.Lock()
+ syncC = ch
+ smu.Unlock()
+ }
+ getSyncC := func() (ch <-chan time.Time) {
+ smu.RLock()
+ ch = syncC
+ smu.RUnlock()
+ return
+ }
+ rh := &raftReadyHandler{
+ getLead: func() (lead uint64) { return s.getLead() },
+ updateLead: func(lead uint64) { s.setLead(lead) },
+ updateLeadership: func(newLeader bool) {
+ if !s.isLeader() {
+ if s.lessor != nil {
+ s.lessor.Demote()
+ }
+ if s.compactor != nil {
+ s.compactor.Pause()
+ }
+ setSyncC(nil)
+ } else {
+ if newLeader {
+ t := time.Now()
+ s.leadTimeMu.Lock()
+ s.leadElectedTime = t
+ s.leadTimeMu.Unlock()
+ }
+ setSyncC(s.SyncTicker.C)
+ if s.compactor != nil {
+ s.compactor.Resume()
+ }
+ }
+ if newLeader {
+ s.leaderChangedMu.Lock()
+ lc := s.leaderChanged
+ s.leaderChanged = make(chan struct{})
+ close(lc)
+ s.leaderChangedMu.Unlock()
+ }
+ // TODO: remove the nil checking
+ // current test utility does not provide the stats
+ if s.stats != nil {
+ s.stats.BecomeLeader()
+ }
+ },
+ updateCommittedIndex: func(ci uint64) {
+ cci := s.getCommittedIndex()
+ if ci > cci {
+ s.setCommittedIndex(ci)
+ }
+ },
+ }
+ s.r.start(rh)
+
+ ep := etcdProgress{
+ confState: sn.Metadata.ConfState,
+ snapi: sn.Metadata.Index,
+ appliedt: sn.Metadata.Term,
+ appliedi: sn.Metadata.Index,
+ }
+
+ defer func() {
+ s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
+ close(s.stopping)
+ s.wgMu.Unlock()
+ s.cancel()
+
+ sched.Stop()
+
+ // wait for gouroutines before closing raft so wal stays open
+ s.wg.Wait()
+
+ s.SyncTicker.Stop()
+
+ // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
+ // by adding a peer after raft stops the transport
+ s.r.stop()
+
+ // kv, lessor and backend can be nil if running without v3 enabled
+ // or running unit tests.
+ if s.lessor != nil {
+ s.lessor.Stop()
+ }
+ if s.kv != nil {
+ s.kv.Close()
+ }
+ if s.authStore != nil {
+ s.authStore.Close()
+ }
+ if s.be != nil {
+ s.be.Close()
+ }
+ if s.compactor != nil {
+ s.compactor.Stop()
+ }
+ close(s.done)
+ }()
+
+ var expiredLeaseC <-chan []*lease.Lease
+ if s.lessor != nil {
+ expiredLeaseC = s.lessor.ExpiredLeasesC()
+ }
+
+ for {
+ select {
+ case ap := <-s.r.apply():
+ f := func(context.Context) { s.applyAll(&ep, &ap) }
+ sched.Schedule(f)
+ case leases := <-expiredLeaseC:
+ s.goAttach(func() {
+ // Increases throughput of expired leases deletion process through parallelization
+ c := make(chan struct{}, maxPendingRevokes)
+ for _, lease := range leases {
+ select {
+ case c <- struct{}{}:
+ case <-s.stopping:
+ return
+ }
+ lid := lease.ID
+ s.goAttach(func() {
+ ctx := s.authStore.WithRoot(s.ctx)
+ _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
+ if lerr == nil {
+ leaseExpired.Inc()
+ } else {
+ if lg != nil {
+ lg.Warn(
+ "failed to revoke lease",
+ zap.String("lease-id", fmt.Sprintf("%016x", lid)),
+ zap.Error(lerr),
+ )
+ } else {
+ plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
+ }
+ }
+
+ <-c
+ })
+ }
+ })
+ case err := <-s.errorc:
+ if lg != nil {
+ lg.Warn("server error", zap.Error(err))
+ lg.Warn("data-dir used by this member must be removed")
+ } else {
+ plog.Errorf("%s", err)
+ plog.Infof("the data-dir used by this member must be removed.")
+ }
+ return
+ case <-getSyncC():
+ if s.v2store.HasTTLKeys() {
+ s.sync(s.Cfg.ReqTimeout())
+ }
+ case <-s.stop:
+ return
+ }
+ }
+}
+
+func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
+ s.applySnapshot(ep, apply)
+ s.applyEntries(ep, apply)
+
+ proposalsApplied.Set(float64(ep.appliedi))
+ s.applyWait.Trigger(ep.appliedi)
+
+ // wait for the raft routine to finish the disk writes before triggering a
+ // snapshot. or applied index might be greater than the last index in raft
+ // storage, since the raft routine might be slower than apply routine.
+ <-apply.notifyc
+
+ s.triggerSnapshot(ep)
+ select {
+ // snapshot requested via send()
+ case m := <-s.r.msgSnapC:
+ merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
+ s.sendMergedSnap(merged)
+ default:
+ }
+}
+
+func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
+ if raft.IsEmptySnap(apply.snapshot) {
+ return
+ }
+ applySnapshotInProgress.Inc()
+
+ lg := s.getLogger()
+ if lg != nil {
+ lg.Info(
+ "applying snapshot",
+ zap.Uint64("current-snapshot-index", ep.snapi),
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
+ )
+ } else {
+ plog.Infof("applying snapshot at index %d...", ep.snapi)
+ }
+ defer func() {
+ if lg != nil {
+ lg.Info(
+ "applied snapshot",
+ zap.Uint64("current-snapshot-index", ep.snapi),
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
+ )
+ } else {
+ plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
+ }
+ applySnapshotInProgress.Dec()
+ }()
+
+ if apply.snapshot.Metadata.Index <= ep.appliedi {
+ if lg != nil {
+ lg.Panic(
+ "unexpected leader snapshot from outdated index",
+ zap.Uint64("current-snapshot-index", ep.snapi),
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term),
+ )
+ } else {
+ plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
+ apply.snapshot.Metadata.Index, ep.appliedi)
+ }
+ }
+
+ // wait for raftNode to persist snapshot onto the disk
+ <-apply.notifyc
+
+ newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to open snapshot backend", zap.Error(err))
+ } else {
+ plog.Panic(err)
+ }
+ }
+
+ // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
+ // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
+ if s.lessor != nil {
+ if lg != nil {
+ lg.Info("restoring lease store")
+ } else {
+ plog.Info("recovering lessor...")
+ }
+
+ s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write(traceutil.TODO()) })
+
+ if lg != nil {
+ lg.Info("restored lease store")
+ } else {
+ plog.Info("finished recovering lessor")
+ }
+ }
+
+ if lg != nil {
+ lg.Info("restoring mvcc store")
+ } else {
+ plog.Info("restoring mvcc store...")
+ }
+
+ if err := s.kv.Restore(newbe); err != nil {
+ if lg != nil {
+ lg.Panic("failed to restore mvcc store", zap.Error(err))
+ } else {
+ plog.Panicf("restore KV error: %v", err)
+ }
+ }
+
+ s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
+ if lg != nil {
+ lg.Info("restored mvcc store")
+ } else {
+ plog.Info("finished restoring mvcc store")
+ }
+
+ // Closing old backend might block until all the txns
+ // on the backend are finished.
+ // We do not want to wait on closing the old backend.
+ s.bemu.Lock()
+ oldbe := s.be
+ go func() {
+ if lg != nil {
+ lg.Info("closing old backend file")
+ } else {
+ plog.Info("closing old backend...")
+ }
+ defer func() {
+ if lg != nil {
+ lg.Info("closed old backend file")
+ } else {
+ plog.Info("finished closing old backend")
+ }
+ }()
+ if err := oldbe.Close(); err != nil {
+ if lg != nil {
+ lg.Panic("failed to close old backend", zap.Error(err))
+ } else {
+ plog.Panicf("close backend error: %v", err)
+ }
+ }
+ }()
+
+ s.be = newbe
+ s.bemu.Unlock()
+
+ if lg != nil {
+ lg.Info("restoring alarm store")
+ } else {
+ plog.Info("recovering alarms...")
+ }
+
+ if err := s.restoreAlarms(); err != nil {
+ if lg != nil {
+ lg.Panic("failed to restore alarm store", zap.Error(err))
+ } else {
+ plog.Panicf("restore alarms error: %v", err)
+ }
+ }
+
+ if lg != nil {
+ lg.Info("restored alarm store")
+ } else {
+ plog.Info("finished recovering alarms")
+ }
+
+ if s.authStore != nil {
+ if lg != nil {
+ lg.Info("restoring auth store")
+ } else {
+ plog.Info("recovering auth store...")
+ }
+
+ s.authStore.Recover(newbe)
+
+ if lg != nil {
+ lg.Info("restored auth store")
+ } else {
+ plog.Info("finished recovering auth store")
+ }
+ }
+
+ if lg != nil {
+ lg.Info("restoring v2 store")
+ } else {
+ plog.Info("recovering store v2...")
+ }
+ if err := s.v2store.Recovery(apply.snapshot.Data); err != nil {
+ if lg != nil {
+ lg.Panic("failed to restore v2 store", zap.Error(err))
+ } else {
+ plog.Panicf("recovery store error: %v", err)
+ }
+ }
+
+ if lg != nil {
+ lg.Info("restored v2 store")
+ } else {
+ plog.Info("finished recovering store v2")
+ }
+
+ s.cluster.SetBackend(newbe)
+
+ if lg != nil {
+ lg.Info("restoring cluster configuration")
+ } else {
+ plog.Info("recovering cluster configuration...")
+ }
+
+ s.cluster.Recover(api.UpdateCapability)
+
+ if lg != nil {
+ lg.Info("restored cluster configuration")
+ lg.Info("removing old peers from network")
+ } else {
+ plog.Info("finished recovering cluster configuration")
+ plog.Info("removing old peers from network...")
+ }
+
+ // recover raft transport
+ s.r.transport.RemoveAllPeers()
+
+ if lg != nil {
+ lg.Info("removed old peers from network")
+ lg.Info("adding peers from new cluster configuration")
+ } else {
+ plog.Info("finished removing old peers from network")
+ plog.Info("adding peers from new cluster configuration into network...")
+ }
+
+ for _, m := range s.cluster.Members() {
+ if m.ID == s.ID() {
+ continue
+ }
+ s.r.transport.AddPeer(m.ID, m.PeerURLs)
+ }
+
+ if lg != nil {
+ lg.Info("added peers from new cluster configuration")
+ } else {
+ plog.Info("finished adding peers from new cluster configuration into network...")
+ }
+
+ ep.appliedt = apply.snapshot.Metadata.Term
+ ep.appliedi = apply.snapshot.Metadata.Index
+ ep.snapi = ep.appliedi
+ ep.confState = apply.snapshot.Metadata.ConfState
+}
+
+func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
+ if len(apply.entries) == 0 {
+ return
+ }
+ firsti := apply.entries[0].Index
+ if firsti > ep.appliedi+1 {
+ if lg := s.getLogger(); lg != nil {
+ lg.Panic(
+ "unexpected committed entry index",
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("first-committed-entry-index", firsti),
+ )
+ } else {
+ plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)
+ }
+ }
+ var ents []raftpb.Entry
+ if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
+ ents = apply.entries[ep.appliedi+1-firsti:]
+ }
+ if len(ents) == 0 {
+ return
+ }
+ var shouldstop bool
+ if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
+ go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
+ }
+}
+
+func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
+ if ep.appliedi-ep.snapi <= s.Cfg.SnapshotCount {
+ return
+ }
+
+ if lg := s.getLogger(); lg != nil {
+ lg.Info(
+ "triggering snapshot",
+ zap.String("local-member-id", s.ID().String()),
+ zap.Uint64("local-member-applied-index", ep.appliedi),
+ zap.Uint64("local-member-snapshot-index", ep.snapi),
+ zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount),
+ )
+ } else {
+ plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi)
+ }
+
+ s.snapshot(ep.appliedi, ep.confState)
+ ep.snapi = ep.appliedi
+}
+
+func (s *EtcdServer) hasMultipleVotingMembers() bool {
+ return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1
+}
+
+func (s *EtcdServer) isLeader() bool {
+ return uint64(s.ID()) == s.Lead()
+}
+
+// MoveLeader transfers the leader to the given transferee.
+func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
+ if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner {
+ return ErrBadLeaderTransferee
+ }
+
+ now := time.Now()
+ interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
+
+ if lg := s.getLogger(); lg != nil {
+ lg.Info(
+ "leadership transfer starting",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("current-leader-member-id", types.ID(lead).String()),
+ zap.String("transferee-member-id", types.ID(transferee).String()),
+ )
+ } else {
+ plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
+ }
+
+ s.r.TransferLeadership(ctx, lead, transferee)
+ for s.Lead() != transferee {
+ select {
+ case <-ctx.Done(): // time out
+ return ErrTimeoutLeaderTransfer
+ case <-time.After(interval):
+ }
+ }
+
+ // TODO: drain all requests, or drop all messages to the old leader
+ if lg := s.getLogger(); lg != nil {
+ lg.Info(
+ "leadership transfer finished",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("old-leader-member-id", types.ID(lead).String()),
+ zap.String("new-leader-member-id", types.ID(transferee).String()),
+ zap.Duration("took", time.Since(now)),
+ )
+ } else {
+ plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
+ }
+ return nil
+}
+
+// TransferLeadership transfers the leader to the chosen transferee.
+func (s *EtcdServer) TransferLeadership() error {
+ if !s.isLeader() {
+ if lg := s.getLogger(); lg != nil {
+ lg.Info(
+ "skipped leadership transfer; local server is not leader",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
+ )
+ } else {
+ plog.Printf("skipped leadership transfer for stopping non-leader member")
+ }
+ return nil
+ }
+
+ if !s.hasMultipleVotingMembers() {
+ if lg := s.getLogger(); lg != nil {
+ lg.Info(
+ "skipped leadership transfer for single voting member cluster",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
+ )
+ } else {
+ plog.Printf("skipped leadership transfer for single voting member cluster")
+ }
+ return nil
+ }
+
+ transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs())
+ if !ok {
+ return ErrUnhealthy
+ }
+
+ tm := s.Cfg.ReqTimeout()
+ ctx, cancel := context.WithTimeout(s.ctx, tm)
+ err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
+ cancel()
+ return err
+}
+
+// HardStop stops the server without coordination with other members in the cluster.
+func (s *EtcdServer) HardStop() {
+ select {
+ case s.stop <- struct{}{}:
+ case <-s.done:
+ return
+ }
+ <-s.done
+}
+
+// Stop stops the server gracefully, and shuts down the running goroutine.
+// Stop should be called after a Start(s), otherwise it will block forever.
+// When stopping leader, Stop transfers its leadership to one of its peers
+// before stopping the server.
+// Stop terminates the Server and performs any necessary finalization.
+// Do and Process cannot be called after Stop has been invoked.
+func (s *EtcdServer) Stop() {
+ if err := s.TransferLeadership(); err != nil {
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn("leadership transfer failed", zap.String("local-member-id", s.ID().String()), zap.Error(err))
+ } else {
+ plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
+ }
+ }
+ s.HardStop()
+}
+
+// ReadyNotify returns a channel that will be closed when the server
+// is ready to serve client requests
+func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
+
+func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
+ select {
+ case <-time.After(d):
+ case <-s.done:
+ }
+ select {
+ case s.errorc <- err:
+ default:
+ }
+}
+
+// StopNotify returns a channel that receives a empty struct
+// when the server is stopped.
+func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
+
+func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
+
+func (s *EtcdServer) LeaderStats() []byte {
+ lead := s.getLead()
+ if lead != uint64(s.id) {
+ return nil
+ }
+ return s.lstats.JSON()
+}
+
+func (s *EtcdServer) StoreStats() []byte { return s.v2store.JsonStats() }
+
+func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
+ if s.authStore == nil {
+ // In the context of ordinary etcd process, s.authStore will never be nil.
+ // This branch is for handling cases in server_test.go
+ return nil
+ }
+
+ // Note that this permission check is done in the API layer,
+ // so TOCTOU problem can be caused potentially in a schedule like this:
+ // update membership with user A -> revoke root role of A -> apply membership change
+ // in the state machine layer
+ // However, both of membership change and role management requires the root privilege.
+ // So careful operation by admins can prevent the problem.
+ authInfo, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+
+ return s.AuthStore().IsAdminPermitted(authInfo)
+}
+
+func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+
+ // TODO: move Member to protobuf type
+ b, err := json.Marshal(memb)
+ if err != nil {
+ return nil, err
+ }
+
+ // by default StrictReconfigCheck is enabled; reject new members if unhealthy.
+ if err := s.mayAddMember(memb); err != nil {
+ return nil, err
+ }
+
+ cc := raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: uint64(memb.ID),
+ Context: b,
+ }
+
+ if memb.IsLearner {
+ cc.Type = raftpb.ConfChangeAddLearnerNode
+ }
+
+ return s.configure(ctx, cc)
+}
+
+func (s *EtcdServer) mayAddMember(memb membership.Member) error {
+ if !s.Cfg.StrictReconfigCheck {
+ return nil
+ }
+
+ // protect quorum when adding voting member
+ if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() {
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn(
+ "rejecting member add request; not enough healthy members",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
+ zap.Error(ErrNotEnoughStartedMembers),
+ )
+ } else {
+ plog.Warningf("not enough started members, rejecting member add %+v", memb)
+ }
+ return ErrNotEnoughStartedMembers
+ }
+
+ if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.VotingMembers()) {
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn(
+ "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
+ zap.Error(ErrUnhealthy),
+ )
+ } else {
+ plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
+ }
+ return ErrUnhealthy
+ }
+
+ return nil
+}
+
+func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+
+ // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
+ if err := s.mayRemoveMember(types.ID(id)); err != nil {
+ return nil, err
+ }
+
+ cc := raftpb.ConfChange{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: id,
+ }
+ return s.configure(ctx, cc)
+}
+
+// PromoteMember promotes a learner node to a voting node.
+func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ // only raft leader has information on whether the to-be-promoted learner node is ready. If promoteMember call
+ // fails with ErrNotLeader, forward the request to leader node via HTTP. If promoteMember call fails with error
+ // other than ErrNotLeader, return the error.
+ resp, err := s.promoteMember(ctx, id)
+ if err == nil {
+ learnerPromoteSucceed.Inc()
+ return resp, nil
+ }
+ if err != ErrNotLeader {
+ learnerPromoteFailed.WithLabelValues(err.Error()).Inc()
+ return resp, err
+ }
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+ // forward to leader
+ for cctx.Err() == nil {
+ leader, err := s.waitLeader(cctx)
+ if err != nil {
+ return nil, err
+ }
+ for _, url := range leader.PeerURLs {
+ resp, err := promoteMemberHTTP(cctx, url, id, s.peerRt)
+ if err == nil {
+ return resp, nil
+ }
+ // If member promotion failed, return early. Otherwise keep retry.
+ if err == ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner {
+ return nil, err
+ }
+ }
+ }
+
+ if cctx.Err() == context.DeadlineExceeded {
+ return nil, ErrTimeout
+ }
+ return nil, ErrCanceled
+}
+
+// promoteMember checks whether the to-be-promoted learner node is ready before sending the promote
+// request to raft.
+// The function returns ErrNotLeader if the local node is not raft leader (therefore does not have
+// enough information to determine if the learner node is ready), returns ErrLearnerNotReady if the
+// local node is leader (therefore has enough information) but decided the learner node is not ready
+// to be promoted.
+func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+
+ // check if we can promote this learner.
+ if err := s.mayPromoteMember(types.ID(id)); err != nil {
+ return nil, err
+ }
+
+ // build the context for the promote confChange. mark IsLearner to false and IsPromote to true.
+ promoteChangeContext := membership.ConfigChangeContext{
+ Member: membership.Member{
+ ID: types.ID(id),
+ },
+ IsPromote: true,
+ }
+
+ b, err := json.Marshal(promoteChangeContext)
+ if err != nil {
+ return nil, err
+ }
+
+ cc := raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: id,
+ Context: b,
+ }
+
+ return s.configure(ctx, cc)
+}
+
+func (s *EtcdServer) mayPromoteMember(id types.ID) error {
+ err := s.isLearnerReady(uint64(id))
+ if err != nil {
+ return err
+ }
+
+ if !s.Cfg.StrictReconfigCheck {
+ return nil
+ }
+ if !s.cluster.IsReadyToPromoteMember(uint64(id)) {
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn(
+ "rejecting member promote request; not enough healthy members",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("requested-member-remove-id", id.String()),
+ zap.Error(ErrNotEnoughStartedMembers),
+ )
+ } else {
+ plog.Warningf("not enough started members, rejecting promote member %s", id)
+ }
+ return ErrNotEnoughStartedMembers
+ }
+
+ return nil
+}
+
+// check whether the learner catches up with leader or not.
+// Note: it will return nil if member is not found in cluster or if member is not learner.
+// These two conditions will be checked before apply phase later.
+func (s *EtcdServer) isLearnerReady(id uint64) error {
+ rs := s.raftStatus()
+
+ // leader's raftStatus.Progress is not nil
+ if rs.Progress == nil {
+ return ErrNotLeader
+ }
+
+ var learnerMatch uint64
+ isFound := false
+ leaderID := rs.ID
+ for memberID, progress := range rs.Progress {
+ if id == memberID {
+ // check its status
+ learnerMatch = progress.Match
+ isFound = true
+ break
+ }
+ }
+
+ if isFound {
+ leaderMatch := rs.Progress[leaderID].Match
+ // the learner's Match not caught up with leader yet
+ if float64(learnerMatch) < float64(leaderMatch)*readyPercent {
+ return ErrLearnerNotReady
+ }
+ }
+
+ return nil
+}
+
+func (s *EtcdServer) mayRemoveMember(id types.ID) error {
+ if !s.Cfg.StrictReconfigCheck {
+ return nil
+ }
+
+ isLearner := s.cluster.IsMemberExist(id) && s.cluster.Member(id).IsLearner
+ // no need to check quorum when removing non-voting member
+ if isLearner {
+ return nil
+ }
+
+ if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) {
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn(
+ "rejecting member remove request; not enough healthy members",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("requested-member-remove-id", id.String()),
+ zap.Error(ErrNotEnoughStartedMembers),
+ )
+ } else {
+ plog.Warningf("not enough started members, rejecting remove member %s", id)
+ }
+ return ErrNotEnoughStartedMembers
+ }
+
+ // downed member is safe to remove since it's not part of the active quorum
+ if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
+ return nil
+ }
+
+ // protect quorum if some members are down
+ m := s.cluster.VotingMembers()
+ active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
+ if (active - 1) < 1+((len(m)-1)/2) {
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn(
+ "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("requested-member-remove", id.String()),
+ zap.Int("active-peers", active),
+ zap.Error(ErrUnhealthy),
+ )
+ } else {
+ plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
+ }
+ return ErrUnhealthy
+ }
+
+ return nil
+}
+
+func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+ b, merr := json.Marshal(memb)
+ if merr != nil {
+ return nil, merr
+ }
+
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+ cc := raftpb.ConfChange{
+ Type: raftpb.ConfChangeUpdateNode,
+ NodeID: uint64(memb.ID),
+ Context: b,
+ }
+ return s.configure(ctx, cc)
+}
+
+func (s *EtcdServer) setCommittedIndex(v uint64) {
+ atomic.StoreUint64(&s.committedIndex, v)
+}
+
+func (s *EtcdServer) getCommittedIndex() uint64 {
+ return atomic.LoadUint64(&s.committedIndex)
+}
+
+func (s *EtcdServer) setAppliedIndex(v uint64) {
+ atomic.StoreUint64(&s.appliedIndex, v)
+}
+
+func (s *EtcdServer) getAppliedIndex() uint64 {
+ return atomic.LoadUint64(&s.appliedIndex)
+}
+
+func (s *EtcdServer) setTerm(v uint64) {
+ atomic.StoreUint64(&s.term, v)
+}
+
+func (s *EtcdServer) getTerm() uint64 {
+ return atomic.LoadUint64(&s.term)
+}
+
+func (s *EtcdServer) setLead(v uint64) {
+ atomic.StoreUint64(&s.lead, v)
+}
+
+func (s *EtcdServer) getLead() uint64 {
+ return atomic.LoadUint64(&s.lead)
+}
+
+func (s *EtcdServer) leaderChangedNotify() <-chan struct{} {
+ s.leaderChangedMu.RLock()
+ defer s.leaderChangedMu.RUnlock()
+ return s.leaderChanged
+}
+
+// RaftStatusGetter represents etcd server and Raft progress.
+type RaftStatusGetter interface {
+ ID() types.ID
+ Leader() types.ID
+ CommittedIndex() uint64
+ AppliedIndex() uint64
+ Term() uint64
+}
+
+func (s *EtcdServer) ID() types.ID { return s.id }
+
+func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) }
+
+func (s *EtcdServer) Lead() uint64 { return s.getLead() }
+
+func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() }
+
+func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() }
+
+func (s *EtcdServer) Term() uint64 { return s.getTerm() }
+
+type confChangeResponse struct {
+ membs []*membership.Member
+ err error
+}
+
+// configure sends a configuration change through consensus and
+// then waits for it to be applied to the server. It
+// will block until the change is performed or there is an error.
+func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
+ cc.ID = s.reqIDGen.Next()
+ ch := s.w.Register(cc.ID)
+
+ start := time.Now()
+ if err := s.r.ProposeConfChange(ctx, cc); err != nil {
+ s.w.Trigger(cc.ID, nil)
+ return nil, err
+ }
+
+ select {
+ case x := <-ch:
+ if x == nil {
+ if lg := s.getLogger(); lg != nil {
+ lg.Panic("failed to configure")
+ } else {
+ plog.Panicf("configure trigger value should never be nil")
+ }
+ }
+ resp := x.(*confChangeResponse)
+ if lg := s.getLogger(); lg != nil {
+ lg.Info(
+ "applied a configuration change through raft",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("raft-conf-change", cc.Type.String()),
+ zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()),
+ )
+ }
+ return resp.membs, resp.err
+
+ case <-ctx.Done():
+ s.w.Trigger(cc.ID, nil) // GC wait
+ return nil, s.parseProposeCtxErr(ctx.Err(), start)
+
+ case <-s.stopping:
+ return nil, ErrStopped
+ }
+}
+
+// sync proposes a SYNC request and is non-blocking.
+// This makes no guarantee that the request will be proposed or performed.
+// The request will be canceled after the given timeout.
+func (s *EtcdServer) sync(timeout time.Duration) {
+ req := pb.Request{
+ Method: "SYNC",
+ ID: s.reqIDGen.Next(),
+ Time: time.Now().UnixNano(),
+ }
+ data := pbutil.MustMarshal(&req)
+ // There is no promise that node has leader when do SYNC request,
+ // so it uses goroutine to propose.
+ ctx, cancel := context.WithTimeout(s.ctx, timeout)
+ s.goAttach(func() {
+ s.r.Propose(ctx, data)
+ cancel()
+ })
+}
+
+// publish registers server information into the cluster. The information
+// is the JSON representation of this server's member struct, updated with the
+// static clientURLs of the server.
+// The function keeps attempting to register until it succeeds,
+// or its server is stopped.
+//
+// Use v2 store to encode member attributes, and apply through Raft
+// but does not go through v2 API endpoint, which means even with v2
+// client handler disabled (e.g. --enable-v2=false), cluster can still
+// process publish requests through rafthttp
+// TODO: Deprecate v2 store
+func (s *EtcdServer) publish(timeout time.Duration) {
+ b, err := json.Marshal(s.attributes)
+ if err != nil {
+ if lg := s.getLogger(); lg != nil {
+ lg.Panic("failed to marshal JSON", zap.Error(err))
+ } else {
+ plog.Panicf("json marshal error: %v", err)
+ }
+ return
+ }
+ req := pb.Request{
+ Method: "PUT",
+ Path: membership.MemberAttributesStorePath(s.id),
+ Val: string(b),
+ }
+
+ for {
+ ctx, cancel := context.WithTimeout(s.ctx, timeout)
+ _, err := s.Do(ctx, req)
+ cancel()
+ switch err {
+ case nil:
+ close(s.readych)
+ if lg := s.getLogger(); lg != nil {
+ lg.Info(
+ "published local member to cluster through raft",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
+ zap.String("request-path", req.Path),
+ zap.String("cluster-id", s.cluster.ID().String()),
+ zap.Duration("publish-timeout", timeout),
+ )
+ } else {
+ plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
+ }
+ return
+
+ case ErrStopped:
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn(
+ "stopped publish because server is stopped",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
+ zap.Duration("publish-timeout", timeout),
+ zap.Error(err),
+ )
+ } else {
+ plog.Infof("aborting publish because server is stopped")
+ }
+ return
+
+ default:
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn(
+ "failed to publish local member to cluster through raft",
+ zap.String("local-member-id", s.ID().String()),
+ zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
+ zap.String("request-path", req.Path),
+ zap.Duration("publish-timeout", timeout),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("publish error: %v", err)
+ }
+ }
+ }
+}
+
+func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
+ atomic.AddInt64(&s.inflightSnapshots, 1)
+
+ lg := s.getLogger()
+ fields := []zap.Field{
+ zap.String("from", s.ID().String()),
+ zap.String("to", types.ID(merged.To).String()),
+ zap.Int64("bytes", merged.TotalSize),
+ zap.String("size", humanize.Bytes(uint64(merged.TotalSize))),
+ }
+
+ now := time.Now()
+ s.r.transport.SendSnapshot(merged)
+ if lg != nil {
+ lg.Info("sending merged snapshot", fields...)
+ }
+
+ s.goAttach(func() {
+ select {
+ case ok := <-merged.CloseNotify():
+ // delay releasing inflight snapshot for another 30 seconds to
+ // block log compaction.
+ // If the follower still fails to catch up, it is probably just too slow
+ // to catch up. We cannot avoid the snapshot cycle anyway.
+ if ok {
+ select {
+ case <-time.After(releaseDelayAfterSnapshot):
+ case <-s.stopping:
+ }
+ }
+
+ atomic.AddInt64(&s.inflightSnapshots, -1)
+
+ if lg != nil {
+ lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...)
+ }
+
+ case <-s.stopping:
+ if lg != nil {
+ lg.Warn("canceled sending merged snapshot; server stopping", fields...)
+ }
+ return
+ }
+ })
+}
+
+// apply takes entries received from Raft (after it has been committed) and
+// applies them to the current state of the EtcdServer.
+// The given entries should not be empty.
+func (s *EtcdServer) apply(
+ es []raftpb.Entry,
+ confState *raftpb.ConfState,
+) (appliedt uint64, appliedi uint64, shouldStop bool) {
+ for i := range es {
+ e := es[i]
+ switch e.Type {
+ case raftpb.EntryNormal:
+ s.applyEntryNormal(&e)
+ s.setAppliedIndex(e.Index)
+ s.setTerm(e.Term)
+
+ case raftpb.EntryConfChange:
+ // set the consistent index of current executing entry
+ if e.Index > s.consistIndex.ConsistentIndex() {
+ s.consistIndex.setConsistentIndex(e.Index)
+ }
+ var cc raftpb.ConfChange
+ pbutil.MustUnmarshal(&cc, e.Data)
+ removedSelf, err := s.applyConfChange(cc, confState)
+ s.setAppliedIndex(e.Index)
+ s.setTerm(e.Term)
+ shouldStop = shouldStop || removedSelf
+ s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
+
+ default:
+ if lg := s.getLogger(); lg != nil {
+ lg.Panic(
+ "unknown entry type; must be either EntryNormal or EntryConfChange",
+ zap.String("type", e.Type.String()),
+ )
+ } else {
+ plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
+ }
+ }
+ appliedi, appliedt = e.Index, e.Term
+ }
+ return appliedt, appliedi, shouldStop
+}
+
+// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
+func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
+ shouldApplyV3 := false
+ if e.Index > s.consistIndex.ConsistentIndex() {
+ // set the consistent index of current executing entry
+ s.consistIndex.setConsistentIndex(e.Index)
+ shouldApplyV3 = true
+ }
+
+ // raft state machine may generate noop entry when leader confirmation.
+ // skip it in advance to avoid some potential bug in the future
+ if len(e.Data) == 0 {
+ select {
+ case s.forceVersionC <- struct{}{}:
+ default:
+ }
+ // promote lessor when the local member is leader and finished
+ // applying all entries from the last term.
+ if s.isLeader() {
+ s.lessor.Promote(s.Cfg.electionTimeout())
+ }
+ return
+ }
+
+ var raftReq pb.InternalRaftRequest
+ if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
+ var r pb.Request
+ rp := &r
+ pbutil.MustUnmarshal(rp, e.Data)
+ s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
+ return
+ }
+ if raftReq.V2 != nil {
+ req := (*RequestV2)(raftReq.V2)
+ s.w.Trigger(req.ID, s.applyV2Request(req))
+ return
+ }
+
+ // do not re-apply applied entries.
+ if !shouldApplyV3 {
+ return
+ }
+
+ id := raftReq.ID
+ if id == 0 {
+ id = raftReq.Header.ID
+ }
+
+ var ar *applyResult
+ needResult := s.w.IsRegistered(id)
+ if needResult || !noSideEffect(&raftReq) {
+ if !needResult && raftReq.Txn != nil {
+ removeNeedlessRangeReqs(raftReq.Txn)
+ }
+ ar = s.applyV3.Apply(&raftReq)
+ }
+
+ if ar == nil {
+ return
+ }
+
+ if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
+ s.w.Trigger(id, ar)
+ return
+ }
+
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn(
+ "message exceeded backend quota; raising alarm",
+ zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
+ zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
+ zap.Error(ar.err),
+ )
+ } else {
+ plog.Errorf("applying raft message exceeded backend quota")
+ }
+
+ s.goAttach(func() {
+ a := &pb.AlarmRequest{
+ MemberID: uint64(s.ID()),
+ Action: pb.AlarmRequest_ACTIVATE,
+ Alarm: pb.AlarmType_NOSPACE,
+ }
+ s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
+ s.w.Trigger(id, ar)
+ })
+}
+
+// applyConfChange applies a ConfChange to the server. It is only
+// invoked with a ConfChange that has already passed through Raft
+func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
+ if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
+ cc.NodeID = raft.None
+ s.r.ApplyConfChange(cc)
+ return false, err
+ }
+
+ lg := s.getLogger()
+ *confState = *s.r.ApplyConfChange(cc)
+ switch cc.Type {
+ case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode:
+ confChangeContext := new(membership.ConfigChangeContext)
+ if err := json.Unmarshal(cc.Context, confChangeContext); err != nil {
+ if lg != nil {
+ lg.Panic("failed to unmarshal member", zap.Error(err))
+ } else {
+ plog.Panicf("unmarshal member should never fail: %v", err)
+ }
+ }
+ if cc.NodeID != uint64(confChangeContext.Member.ID) {
+ if lg != nil {
+ lg.Panic(
+ "got different member ID",
+ zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
+ zap.String("member-id-from-message", confChangeContext.Member.ID.String()),
+ )
+ } else {
+ plog.Panicf("nodeID should always be equal to member ID")
+ }
+ }
+ if confChangeContext.IsPromote {
+ s.cluster.PromoteMember(confChangeContext.Member.ID)
+ } else {
+ s.cluster.AddMember(&confChangeContext.Member)
+
+ if confChangeContext.Member.ID != s.id {
+ s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs)
+ }
+ }
+
+ // update the isLearner metric when this server id is equal to the id in raft member confChange
+ if confChangeContext.Member.ID == s.id {
+ if cc.Type == raftpb.ConfChangeAddLearnerNode {
+ isLearner.Set(1)
+ } else {
+ isLearner.Set(0)
+ }
+ }
+
+ case raftpb.ConfChangeRemoveNode:
+ id := types.ID(cc.NodeID)
+ s.cluster.RemoveMember(id)
+ if id == s.id {
+ return true, nil
+ }
+ s.r.transport.RemovePeer(id)
+
+ case raftpb.ConfChangeUpdateNode:
+ m := new(membership.Member)
+ if err := json.Unmarshal(cc.Context, m); err != nil {
+ if lg != nil {
+ lg.Panic("failed to unmarshal member", zap.Error(err))
+ } else {
+ plog.Panicf("unmarshal member should never fail: %v", err)
+ }
+ }
+ if cc.NodeID != uint64(m.ID) {
+ if lg != nil {
+ lg.Panic(
+ "got different member ID",
+ zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
+ zap.String("member-id-from-message", m.ID.String()),
+ )
+ } else {
+ plog.Panicf("nodeID should always be equal to member ID")
+ }
+ }
+ s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
+ if m.ID != s.id {
+ s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
+ }
+ }
+ return false, nil
+}
+
+// TODO: non-blocking snapshot
+func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
+ clone := s.v2store.Clone()
+ // commit kv to write metadata (for example: consistent index) to disk.
+ // KV().commit() updates the consistent index in backend.
+ // All operations that update consistent index must be called sequentially
+ // from applyAll function.
+ // So KV().Commit() cannot run in parallel with apply. It has to be called outside
+ // the go routine created below.
+ s.KV().Commit()
+
+ s.goAttach(func() {
+ lg := s.getLogger()
+
+ d, err := clone.SaveNoCopy()
+ // TODO: current store will never fail to do a snapshot
+ // what should we do if the store might fail?
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to save v2 store", zap.Error(err))
+ } else {
+ plog.Panicf("store save should never fail: %v", err)
+ }
+ }
+ snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
+ if err != nil {
+ // the snapshot was done asynchronously with the progress of raft.
+ // raft might have already got a newer snapshot.
+ if err == raft.ErrSnapOutOfDate {
+ return
+ }
+ if lg != nil {
+ lg.Panic("failed to create snapshot", zap.Error(err))
+ } else {
+ plog.Panicf("unexpected create snapshot error %v", err)
+ }
+ }
+ // SaveSnap saves the snapshot to file and appends the corresponding WAL entry.
+ if err = s.r.storage.SaveSnap(snap); err != nil {
+ if lg != nil {
+ lg.Panic("failed to save snapshot", zap.Error(err))
+ } else {
+ plog.Fatalf("save snapshot error: %v", err)
+ }
+ }
+ if lg != nil {
+ lg.Info(
+ "saved snapshot",
+ zap.Uint64("snapshot-index", snap.Metadata.Index),
+ )
+ } else {
+ plog.Infof("saved snapshot at index %d", snap.Metadata.Index)
+ }
+ if err = s.r.storage.Release(snap); err != nil {
+ if lg != nil {
+ lg.Panic("failed to release wal", zap.Error(err))
+ } else {
+ plog.Panicf("failed to release wal %v", err)
+ }
+ }
+
+ // When sending a snapshot, etcd will pause compaction.
+ // After receives a snapshot, the slow follower needs to get all the entries right after
+ // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
+ // the snapshot sent might already be compacted. It happens when the snapshot takes long time
+ // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
+ if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
+ if lg != nil {
+ lg.Info("skip compaction since there is an inflight snapshot")
+ } else {
+ plog.Infof("skip compaction since there is an inflight snapshot")
+ }
+ return
+ }
+
+ // keep some in memory log entries for slow followers.
+ compacti := uint64(1)
+ if snapi > s.Cfg.SnapshotCatchUpEntries {
+ compacti = snapi - s.Cfg.SnapshotCatchUpEntries
+ }
+
+ err = s.r.raftStorage.Compact(compacti)
+ if err != nil {
+ // the compaction was done asynchronously with the progress of raft.
+ // raft log might already been compact.
+ if err == raft.ErrCompacted {
+ return
+ }
+ if lg != nil {
+ lg.Panic("failed to compact", zap.Error(err))
+ } else {
+ plog.Panicf("unexpected compaction error %v", err)
+ }
+ }
+ if lg != nil {
+ lg.Info(
+ "compacted Raft logs",
+ zap.Uint64("compact-index", compacti),
+ )
+ } else {
+ plog.Infof("compacted raft log at %d", compacti)
+ }
+ })
+}
+
+// CutPeer drops messages to the specified peer.
+func (s *EtcdServer) CutPeer(id types.ID) {
+ tr, ok := s.r.transport.(*rafthttp.Transport)
+ if ok {
+ tr.CutPeer(id)
+ }
+}
+
+// MendPeer recovers the message dropping behavior of the given peer.
+func (s *EtcdServer) MendPeer(id types.ID) {
+ tr, ok := s.r.transport.(*rafthttp.Transport)
+ if ok {
+ tr.MendPeer(id)
+ }
+}
+
+func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
+
+func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
+
+func (s *EtcdServer) ClusterVersion() *semver.Version {
+ if s.cluster == nil {
+ return nil
+ }
+ return s.cluster.Version()
+}
+
+// monitorVersions checks the member's version every monitorVersionInterval.
+// It updates the cluster version if all members agrees on a higher one.
+// It prints out log if there is a member with a higher version than the
+// local version.
+func (s *EtcdServer) monitorVersions() {
+ for {
+ select {
+ case <-s.forceVersionC:
+ case <-time.After(monitorVersionInterval):
+ case <-s.stopping:
+ return
+ }
+
+ if s.Leader() != s.ID() {
+ continue
+ }
+
+ v := decideClusterVersion(s.getLogger(), getVersions(s.getLogger(), s.cluster, s.id, s.peerRt))
+ if v != nil {
+ // only keep major.minor version for comparison
+ v = &semver.Version{
+ Major: v.Major,
+ Minor: v.Minor,
+ }
+ }
+
+ // if the current version is nil:
+ // 1. use the decided version if possible
+ // 2. or use the min cluster version
+ if s.cluster.Version() == nil {
+ verStr := version.MinClusterVersion
+ if v != nil {
+ verStr = v.String()
+ }
+ s.goAttach(func() { s.updateClusterVersion(verStr) })
+ continue
+ }
+
+ // update cluster version only if the decided version is greater than
+ // the current cluster version
+ if v != nil && s.cluster.Version().LessThan(*v) {
+ s.goAttach(func() { s.updateClusterVersion(v.String()) })
+ }
+ }
+}
+
+func (s *EtcdServer) updateClusterVersion(ver string) {
+ lg := s.getLogger()
+
+ if s.cluster.Version() == nil {
+ if lg != nil {
+ lg.Info(
+ "setting up initial cluster version",
+ zap.String("cluster-version", version.Cluster(ver)),
+ )
+ } else {
+ plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver))
+ }
+ } else {
+ if lg != nil {
+ lg.Info(
+ "updating cluster version",
+ zap.String("from", version.Cluster(s.cluster.Version().String())),
+ zap.String("to", version.Cluster(ver)),
+ )
+ } else {
+ plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver))
+ }
+ }
+
+ req := pb.Request{
+ Method: "PUT",
+ Path: membership.StoreClusterVersionKey(),
+ Val: ver,
+ }
+
+ ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
+ _, err := s.Do(ctx, req)
+ cancel()
+
+ switch err {
+ case nil:
+ if lg != nil {
+ lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver)))
+ }
+ return
+
+ case ErrStopped:
+ if lg != nil {
+ lg.Warn("aborting cluster version update; server is stopped", zap.Error(err))
+ } else {
+ plog.Infof("aborting update cluster version because server is stopped")
+ }
+ return
+
+ default:
+ if lg != nil {
+ lg.Warn("failed to update cluster version", zap.Error(err))
+ } else {
+ plog.Errorf("error updating cluster version (%v)", err)
+ }
+ }
+}
+
+func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
+ switch err {
+ case context.Canceled:
+ return ErrCanceled
+
+ case context.DeadlineExceeded:
+ s.leadTimeMu.RLock()
+ curLeadElected := s.leadElectedTime
+ s.leadTimeMu.RUnlock()
+ prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
+ if start.After(prevLeadLost) && start.Before(curLeadElected) {
+ return ErrTimeoutDueToLeaderFail
+ }
+ lead := types.ID(s.getLead())
+ switch lead {
+ case types.ID(raft.None):
+ // TODO: return error to specify it happens because the cluster does not have leader now
+ case s.ID():
+ if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
+ return ErrTimeoutDueToConnectionLost
+ }
+ default:
+ if !isConnectedSince(s.r.transport, start, lead) {
+ return ErrTimeoutDueToConnectionLost
+ }
+ }
+ return ErrTimeout
+
+ default:
+ return err
+ }
+}
+
+func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv }
+func (s *EtcdServer) Backend() backend.Backend {
+ s.bemu.Lock()
+ defer s.bemu.Unlock()
+ return s.be
+}
+
+func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
+
+func (s *EtcdServer) restoreAlarms() error {
+ s.applyV3 = s.newApplierV3()
+ as, err := v3alarm.NewAlarmStore(s)
+ if err != nil {
+ return err
+ }
+ s.alarmStore = as
+ if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
+ s.applyV3 = newApplierV3Capped(s.applyV3)
+ }
+ if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
+ s.applyV3 = newApplierV3Corrupt(s.applyV3)
+ }
+ return nil
+}
+
+// goAttach creates a goroutine on a given function and tracks it using
+// the etcdserver waitgroup.
+func (s *EtcdServer) goAttach(f func()) {
+ s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
+ defer s.wgMu.RUnlock()
+ select {
+ case <-s.stopping:
+ if lg := s.getLogger(); lg != nil {
+ lg.Warn("server has stopped; skipping goAttach")
+ } else {
+ plog.Warning("server has stopped (skipping goAttach)")
+ }
+ return
+ default:
+ }
+
+ // now safe to add since waitgroup wait has not started yet
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ f()
+ }()
+}
+
+func (s *EtcdServer) Alarms() []*pb.AlarmMember {
+ return s.alarmStore.Get(pb.AlarmType_NONE)
+}
+
+func (s *EtcdServer) Logger() *zap.Logger {
+ return s.lg
+}
+
+// IsLearner returns if the local member is raft learner
+func (s *EtcdServer) IsLearner() bool {
+ return s.cluster.IsLocalMemberLearner()
+}
+
+// IsMemberExist returns if the member with the given id exists in cluster.
+func (s *EtcdServer) IsMemberExist(id types.ID) bool {
+ return s.cluster.IsMemberExist(id)
+}
+
+// raftStatus returns the raft status of this etcd node.
+func (s *EtcdServer) raftStatus() raft.Status {
+ return s.r.Node.Status()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/server_access_control.go b/vendor/go.etcd.io/etcd/etcdserver/server_access_control.go
new file mode 100644
index 000000000000..09e2255ccca9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/server_access_control.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import "sync"
+
+// AccessController controls etcd server HTTP request access.
+type AccessController struct {
+ corsMu sync.RWMutex
+ CORS map[string]struct{}
+ hostWhitelistMu sync.RWMutex
+ HostWhitelist map[string]struct{}
+}
+
+// NewAccessController returns a new "AccessController" with default "*" values.
+func NewAccessController() *AccessController {
+ return &AccessController{
+ CORS: map[string]struct{}{"*": {}},
+ HostWhitelist: map[string]struct{}{"*": {}},
+ }
+}
+
+// OriginAllowed determines whether the server will allow a given CORS origin.
+// If CORS is empty, allow all.
+func (ac *AccessController) OriginAllowed(origin string) bool {
+ ac.corsMu.RLock()
+ defer ac.corsMu.RUnlock()
+ if len(ac.CORS) == 0 { // allow all
+ return true
+ }
+ _, ok := ac.CORS["*"]
+ if ok {
+ return true
+ }
+ _, ok = ac.CORS[origin]
+ return ok
+}
+
+// IsHostWhitelisted returns true if the host is whitelisted.
+// If whitelist is empty, allow all.
+func (ac *AccessController) IsHostWhitelisted(host string) bool {
+ ac.hostWhitelistMu.RLock()
+ defer ac.hostWhitelistMu.RUnlock()
+ if len(ac.HostWhitelist) == 0 { // allow all
+ return true
+ }
+ _, ok := ac.HostWhitelist["*"]
+ if ok {
+ return true
+ }
+ _, ok = ac.HostWhitelist[host]
+ return ok
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/snapshot_merge.go b/vendor/go.etcd.io/etcd/etcdserver/snapshot_merge.go
new file mode 100644
index 000000000000..41777681321f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/snapshot_merge.go
@@ -0,0 +1,100 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "io"
+
+ "go.etcd.io/etcd/etcdserver/api/snap"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/raft/raftpb"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+)
+
+// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
+// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
+// as ReadCloser.
+func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
+ // get a snapshot of v2 store as []byte
+ clone := s.v2store.Clone()
+ d, err := clone.SaveNoCopy()
+ if err != nil {
+ if lg := s.getLogger(); lg != nil {
+ lg.Panic("failed to save v2 store data", zap.Error(err))
+ } else {
+ plog.Panicf("store save should never fail: %v", err)
+ }
+ }
+
+ // commit kv to write metadata(for example: consistent index).
+ s.KV().Commit()
+ dbsnap := s.be.Snapshot()
+ // get a snapshot of v3 KV as readCloser
+ rc := newSnapshotReaderCloser(s.getLogger(), dbsnap)
+
+ // put the []byte snapshot of store into raft snapshot and return the merged snapshot with
+ // KV readCloser snapshot.
+ snapshot := raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Index: snapi,
+ Term: snapt,
+ ConfState: confState,
+ },
+ Data: d,
+ }
+ m.Snapshot = snapshot
+
+ return *snap.NewMessage(m, rc, dbsnap.Size())
+}
+
+func newSnapshotReaderCloser(lg *zap.Logger, snapshot backend.Snapshot) io.ReadCloser {
+ pr, pw := io.Pipe()
+ go func() {
+ n, err := snapshot.WriteTo(pw)
+ if err == nil {
+ if lg != nil {
+ lg.Info(
+ "sent database snapshot to writer",
+ zap.Int64("bytes", n),
+ zap.String("size", humanize.Bytes(uint64(n))),
+ )
+ } else {
+ plog.Infof("wrote database snapshot out [total bytes: %d]", n)
+ }
+ } else {
+ if lg != nil {
+ lg.Warn(
+ "failed to send database snapshot to writer",
+ zap.String("size", humanize.Bytes(uint64(n))),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err)
+ }
+ }
+ pw.CloseWithError(err)
+ err = snapshot.Close()
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to close database snapshot", zap.Error(err))
+ } else {
+ plog.Panicf("failed to close database snapshot: %v", err)
+ }
+ }
+ }()
+ return pr
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/storage.go b/vendor/go.etcd.io/etcd/etcdserver/storage.go
new file mode 100644
index 000000000000..9a000b8da667
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/storage.go
@@ -0,0 +1,133 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "io"
+
+ "go.etcd.io/etcd/etcdserver/api/snap"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/pbutil"
+ "go.etcd.io/etcd/pkg/types"
+ "go.etcd.io/etcd/raft/raftpb"
+ "go.etcd.io/etcd/wal"
+ "go.etcd.io/etcd/wal/walpb"
+
+ "go.uber.org/zap"
+)
+
+type Storage interface {
+ // Save function saves ents and state to the underlying stable storage.
+ // Save MUST block until st and ents are on stable storage.
+ Save(st raftpb.HardState, ents []raftpb.Entry) error
+ // SaveSnap function saves snapshot to the underlying stable storage.
+ SaveSnap(snap raftpb.Snapshot) error
+ // Close closes the Storage and performs finalization.
+ Close() error
+ // Release releases the locked wal files older than the provided snapshot.
+ Release(snap raftpb.Snapshot) error
+ // Sync WAL
+ Sync() error
+}
+
+type storage struct {
+ *wal.WAL
+ *snap.Snapshotter
+}
+
+func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage {
+ return &storage{w, s}
+}
+
+// SaveSnap saves the snapshot file to disk and writes the WAL snapshot entry.
+func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
+ walsnap := walpb.Snapshot{
+ Index: snap.Metadata.Index,
+ Term: snap.Metadata.Term,
+ }
+ // save the snapshot file before writing the snapshot to the wal.
+ // This makes it possible for the snapshot file to become orphaned, but prevents
+ // a WAL snapshot entry from having no corresponding snapshot file.
+ err := st.Snapshotter.SaveSnap(snap)
+ if err != nil {
+ return err
+ }
+ // gofail: var raftBeforeWALSaveSnaphot struct{}
+
+ return st.WAL.SaveSnapshot(walsnap)
+}
+
+// Release releases resources older than the given snap and are no longer needed:
+// - releases the locks to the wal files that are older than the provided wal for the given snap.
+// - deletes any .snap.db files that are older than the given snap.
+func (st *storage) Release(snap raftpb.Snapshot) error {
+ if err := st.WAL.ReleaseLockTo(snap.Metadata.Index); err != nil {
+ return err
+ }
+ return st.Snapshotter.ReleaseSnapDBs(snap)
+}
+
+// readWAL reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
+// after the position of the given snap in the WAL.
+// The snap must have been previously saved to the WAL, or this call will panic.
+func readWAL(lg *zap.Logger, waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
+ var (
+ err error
+ wmetadata []byte
+ )
+
+ repaired := false
+ for {
+ if w, err = wal.Open(lg, waldir, snap); err != nil {
+ if lg != nil {
+ lg.Fatal("failed to open WAL", zap.Error(err))
+ } else {
+ plog.Fatalf("open wal error: %v", err)
+ }
+ }
+ if wmetadata, st, ents, err = w.ReadAll(); err != nil {
+ w.Close()
+ // we can only repair ErrUnexpectedEOF and we never repair twice.
+ if repaired || err != io.ErrUnexpectedEOF {
+ if lg != nil {
+ lg.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
+ } else {
+ plog.Fatalf("read wal error (%v) and cannot be repaired", err)
+ }
+ }
+ if !wal.Repair(lg, waldir) {
+ if lg != nil {
+ lg.Fatal("failed to repair WAL", zap.Error(err))
+ } else {
+ plog.Fatalf("WAL error (%v) cannot be repaired", err)
+ }
+ } else {
+ if lg != nil {
+ lg.Info("repaired WAL", zap.Error(err))
+ } else {
+ plog.Infof("repaired WAL error (%v)", err)
+ }
+ repaired = true
+ }
+ continue
+ }
+ break
+ }
+ var metadata pb.Metadata
+ pbutil.MustUnmarshal(&metadata, wmetadata)
+ id = types.ID(metadata.NodeID)
+ cid = types.ID(metadata.ClusterID)
+ return w, id, cid, st, ents
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/util.go b/vendor/go.etcd.io/etcd/etcdserver/util.go
new file mode 100644
index 000000000000..ece1c2ce0869
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/util.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ "go.etcd.io/etcd/etcdserver/api/rafthttp"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/pkg/types"
+
+ "go.uber.org/zap"
+)
+
+// isConnectedToQuorumSince checks whether the local member is connected to the
+// quorum of the cluster since the given time.
+func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
+ return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
+}
+
+// isConnectedSince checks whether the local member is connected to the
+// remote member since the given time.
+func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
+ t := transport.ActiveSince(remote)
+ return !t.IsZero() && t.Before(since)
+}
+
+// isConnectedFullySince checks whether the local member is connected to all
+// members in the cluster since the given time.
+func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
+ return numConnectedSince(transport, since, self, members) == len(members)
+}
+
+// numConnectedSince counts how many members are connected to the local member
+// since the given time.
+func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
+ connectedNum := 0
+ for _, m := range members {
+ if m.ID == self || isConnectedSince(transport, since, m.ID) {
+ connectedNum++
+ }
+ }
+ return connectedNum
+}
+
+// longestConnected chooses the member with longest active-since-time.
+// It returns false, if nothing is active.
+func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
+ var longest types.ID
+ var oldest time.Time
+ for _, id := range membs {
+ tm := tp.ActiveSince(id)
+ if tm.IsZero() { // inactive
+ continue
+ }
+
+ if oldest.IsZero() { // first longest candidate
+ oldest = tm
+ longest = id
+ }
+
+ if tm.Before(oldest) {
+ oldest = tm
+ longest = id
+ }
+ }
+ if uint64(longest) == 0 {
+ return longest, false
+ }
+ return longest, true
+}
+
+type notifier struct {
+ c chan struct{}
+ err error
+}
+
+func newNotifier() *notifier {
+ return ¬ifier{
+ c: make(chan struct{}),
+ }
+}
+
+func (nc *notifier) notify(err error) {
+ nc.err = err
+ close(nc.c)
+}
+
+func warnOfExpensiveRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
+ var resp string
+ if !isNil(respMsg) {
+ resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
+ }
+ warnOfExpensiveGenericRequest(lg, now, reqStringer, "", resp, err)
+}
+
+func warnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
+ var resp string
+ if !isNil(respMsg) {
+ resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
+ }
+ d := time.Since(now)
+ if lg != nil {
+ lg.Warn(
+ "failed to apply request",
+ zap.Duration("took", d),
+ zap.String("request", reqStringer.String()),
+ zap.String("response", resp),
+ zap.Error(err),
+ )
+ } else {
+ plog.Warningf("failed to apply request %q with response %q took (%v) to execute, err is %v", reqStringer.String(), resp, d, err)
+ }
+}
+
+func warnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
+ reqStringer := pb.NewLoggableTxnRequest(r)
+ var resp string
+ if !isNil(txnResponse) {
+ var resps []string
+ for _, r := range txnResponse.Responses {
+ switch op := r.Response.(type) {
+ case *pb.ResponseOp_ResponseRange:
+ resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs)))
+ default:
+ // only range responses should be in a read only txn request
+ }
+ }
+ resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), proto.Size(txnResponse))
+ }
+ warnOfExpensiveGenericRequest(lg, now, reqStringer, "read-only range ", resp, err)
+}
+
+func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
+ var resp string
+ if !isNil(rangeResponse) {
+ resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), proto.Size(rangeResponse))
+ }
+ warnOfExpensiveGenericRequest(lg, now, reqStringer, "read-only range ", resp, err)
+}
+
+func warnOfExpensiveGenericRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
+ d := time.Since(now)
+ if d > warnApplyDuration {
+ if lg != nil {
+ lg.Warn(
+ "apply request took too long",
+ zap.Duration("took", d),
+ zap.Duration("expected-duration", warnApplyDuration),
+ zap.String("prefix", prefix),
+ zap.String("request", reqStringer.String()),
+ zap.String("response", resp),
+ zap.Error(err),
+ )
+ } else {
+ var result string
+ if err != nil {
+ result = fmt.Sprintf("error:%v", err)
+ } else {
+ result = resp
+ }
+ plog.Warningf("%srequest %q with result %q took too long (%v) to execute", prefix, reqStringer.String(), result, d)
+ }
+ slowApplies.Inc()
+ }
+}
+
+func isNil(msg proto.Message) bool {
+ return msg == nil || reflect.ValueOf(msg).IsNil()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/v2_server.go b/vendor/go.etcd.io/etcd/etcdserver/v2_server.go
new file mode 100644
index 000000000000..9238b2dc5803
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/v2_server.go
@@ -0,0 +1,165 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "time"
+
+ "go.etcd.io/etcd/etcdserver/api/v2store"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+)
+
+type RequestV2 pb.Request
+
+type RequestV2Handler interface {
+ Post(ctx context.Context, r *RequestV2) (Response, error)
+ Put(ctx context.Context, r *RequestV2) (Response, error)
+ Delete(ctx context.Context, r *RequestV2) (Response, error)
+ QGet(ctx context.Context, r *RequestV2) (Response, error)
+ Get(ctx context.Context, r *RequestV2) (Response, error)
+ Head(ctx context.Context, r *RequestV2) (Response, error)
+}
+
+type reqV2HandlerEtcdServer struct {
+ reqV2HandlerStore
+ s *EtcdServer
+}
+
+type reqV2HandlerStore struct {
+ store v2store.Store
+ applier ApplierV2
+}
+
+func NewStoreRequestV2Handler(s v2store.Store, applier ApplierV2) RequestV2Handler {
+ return &reqV2HandlerStore{s, applier}
+}
+
+func (a *reqV2HandlerStore) Post(ctx context.Context, r *RequestV2) (Response, error) {
+ return a.applier.Post(r), nil
+}
+
+func (a *reqV2HandlerStore) Put(ctx context.Context, r *RequestV2) (Response, error) {
+ return a.applier.Put(r), nil
+}
+
+func (a *reqV2HandlerStore) Delete(ctx context.Context, r *RequestV2) (Response, error) {
+ return a.applier.Delete(r), nil
+}
+
+func (a *reqV2HandlerStore) QGet(ctx context.Context, r *RequestV2) (Response, error) {
+ return a.applier.QGet(r), nil
+}
+
+func (a *reqV2HandlerStore) Get(ctx context.Context, r *RequestV2) (Response, error) {
+ if r.Wait {
+ wc, err := a.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
+ return Response{Watcher: wc}, err
+ }
+ ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
+ return Response{Event: ev}, err
+}
+
+func (a *reqV2HandlerStore) Head(ctx context.Context, r *RequestV2) (Response, error) {
+ ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
+ return Response{Event: ev}, err
+}
+
+func (a *reqV2HandlerEtcdServer) Post(ctx context.Context, r *RequestV2) (Response, error) {
+ return a.processRaftRequest(ctx, r)
+}
+
+func (a *reqV2HandlerEtcdServer) Put(ctx context.Context, r *RequestV2) (Response, error) {
+ return a.processRaftRequest(ctx, r)
+}
+
+func (a *reqV2HandlerEtcdServer) Delete(ctx context.Context, r *RequestV2) (Response, error) {
+ return a.processRaftRequest(ctx, r)
+}
+
+func (a *reqV2HandlerEtcdServer) QGet(ctx context.Context, r *RequestV2) (Response, error) {
+ return a.processRaftRequest(ctx, r)
+}
+
+func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *RequestV2) (Response, error) {
+ data, err := ((*pb.Request)(r)).Marshal()
+ if err != nil {
+ return Response{}, err
+ }
+ ch := a.s.w.Register(r.ID)
+
+ start := time.Now()
+ a.s.r.Propose(ctx, data)
+ proposalsPending.Inc()
+ defer proposalsPending.Dec()
+
+ select {
+ case x := <-ch:
+ resp := x.(Response)
+ return resp, resp.Err
+ case <-ctx.Done():
+ proposalsFailed.Inc()
+ a.s.w.Trigger(r.ID, nil) // GC wait
+ return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
+ case <-a.s.stopping:
+ }
+ return Response{}, ErrStopped
+}
+
+func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
+ r.ID = s.reqIDGen.Next()
+ h := &reqV2HandlerEtcdServer{
+ reqV2HandlerStore: reqV2HandlerStore{
+ store: s.v2store,
+ applier: s.applyV2,
+ },
+ s: s,
+ }
+ rp := &r
+ resp, err := ((*RequestV2)(rp)).Handle(ctx, h)
+ resp.Term, resp.Index = s.Term(), s.CommittedIndex()
+ return resp, err
+}
+
+// Handle interprets r and performs an operation on s.store according to r.Method
+// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
+// Quorum == true, r will be sent through consensus before performing its
+// respective operation. Do will block until an action is performed or there is
+// an error.
+func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) {
+ if r.Method == "GET" && r.Quorum {
+ r.Method = "QGET"
+ }
+ switch r.Method {
+ case "POST":
+ return v2api.Post(ctx, r)
+ case "PUT":
+ return v2api.Put(ctx, r)
+ case "DELETE":
+ return v2api.Delete(ctx, r)
+ case "QGET":
+ return v2api.QGet(ctx, r)
+ case "GET":
+ return v2api.Get(ctx, r)
+ case "HEAD":
+ return v2api.Head(ctx, r)
+ }
+ return Response{}, ErrUnknownMethod
+}
+
+func (r *RequestV2) String() string {
+ rpb := pb.Request(*r)
+ return rpb.String()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/v3_server.go b/vendor/go.etcd.io/etcd/etcdserver/v3_server.go
new file mode 100644
index 000000000000..70b7177d39e9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/etcdserver/v3_server.go
@@ -0,0 +1,805 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "time"
+
+ "go.etcd.io/etcd/auth"
+ "go.etcd.io/etcd/etcdserver/api/membership"
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/lease/leasehttp"
+ "go.etcd.io/etcd/mvcc"
+ "go.etcd.io/etcd/pkg/traceutil"
+ "go.etcd.io/etcd/raft"
+
+ "github.com/gogo/protobuf/proto"
+ "go.uber.org/zap"
+)
+
+const (
+ // In the health case, there might be a small gap (10s of entries) between
+ // the applied index and committed index.
+ // However, if the committed entries are very heavy to apply, the gap might grow.
+ // We should stop accepting new proposals if the gap growing to a certain point.
+ maxGapBetweenApplyAndCommitIndex = 5000
+ traceThreshold = 100 * time.Millisecond
+)
+
+type RaftKV interface {
+ Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
+ Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
+ DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
+ Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error)
+ Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
+}
+
+type Lessor interface {
+ // LeaseGrant sends LeaseGrant request to raft and apply it after committed.
+ LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
+ // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed.
+ LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
+
+ // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
+ // is returned.
+ LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
+
+ // LeaseTimeToLive retrieves lease information.
+ LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
+
+ // LeaseLeases lists all leases.
+ LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error)
+}
+
+type Authenticator interface {
+ AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error)
+ AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error)
+ Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error)
+ UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+ UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+ UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+ UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+ UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+ UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+ RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+ RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+ RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+ RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+ RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+ UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+ RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+}
+
+func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ trace := traceutil.New("range",
+ s.getLogger(),
+ traceutil.Field{Key: "range_begin", Value: string(r.Key)},
+ traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)},
+ )
+ ctx = context.WithValue(ctx, traceutil.TraceKey, trace)
+
+ var resp *pb.RangeResponse
+ var err error
+ defer func(start time.Time) {
+ warnOfExpensiveReadOnlyRangeRequest(s.getLogger(), start, r, resp, err)
+ if resp != nil {
+ trace.AddField(
+ traceutil.Field{Key: "response_count", Value: len(resp.Kvs)},
+ traceutil.Field{Key: "response_revision", Value: resp.Header.Revision},
+ )
+ }
+ trace.LogIfLong(traceThreshold)
+ }(time.Now())
+
+ if !r.Serializable {
+ err = s.linearizableReadNotify(ctx)
+ trace.Step("agreement among raft nodes before linearized reading")
+ if err != nil {
+ return nil, err
+ }
+ }
+ chk := func(ai *auth.AuthInfo) error {
+ return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
+ }
+
+ get := func() { resp, err = s.applyV3Base.Range(ctx, nil, r) }
+ if serr := s.doSerialize(ctx, chk, get); serr != nil {
+ err = serr
+ return nil, err
+ }
+ return resp, err
+}
+
+func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ ctx = context.WithValue(ctx, traceutil.StartTimeKey, time.Now())
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.PutResponse), nil
+}
+
+func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.DeleteRangeResponse), nil
+}
+
+func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if isTxnReadonly(r) {
+ if !isTxnSerializable(r) {
+ err := s.linearizableReadNotify(ctx)
+ if err != nil {
+ return nil, err
+ }
+ }
+ var resp *pb.TxnResponse
+ var err error
+ chk := func(ai *auth.AuthInfo) error {
+ return checkTxnAuth(s.authStore, ai, r)
+ }
+
+ defer func(start time.Time) {
+ warnOfExpensiveReadOnlyTxnRequest(s.getLogger(), start, r, resp, err)
+ }(time.Now())
+
+ get := func() { resp, err = s.applyV3Base.Txn(r) }
+ if serr := s.doSerialize(ctx, chk, get); serr != nil {
+ return nil, serr
+ }
+ return resp, err
+ }
+
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.TxnResponse), nil
+}
+
+func isTxnSerializable(r *pb.TxnRequest) bool {
+ for _, u := range r.Success {
+ if r := u.GetRequestRange(); r == nil || !r.Serializable {
+ return false
+ }
+ }
+ for _, u := range r.Failure {
+ if r := u.GetRequestRange(); r == nil || !r.Serializable {
+ return false
+ }
+ }
+ return true
+}
+
+func isTxnReadonly(r *pb.TxnRequest) bool {
+ for _, u := range r.Success {
+ if r := u.GetRequestRange(); r == nil {
+ return false
+ }
+ }
+ for _, u := range r.Failure {
+ if r := u.GetRequestRange(); r == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+ startTime := time.Now()
+ result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
+ trace := traceutil.TODO()
+ if result != nil && result.trace != nil {
+ trace = result.trace
+ defer func() {
+ trace.LogIfLong(traceThreshold)
+ }()
+ applyStart := result.trace.GetStartTime()
+ result.trace.SetStartTime(startTime)
+ trace.InsertStep(0, applyStart, "process raft request")
+ }
+ if r.Physical && result != nil && result.physc != nil {
+ <-result.physc
+ // The compaction is done deleting keys; the hash is now settled
+ // but the data is not necessarily committed. If there's a crash,
+ // the hash may revert to a hash prior to compaction completing
+ // if the compaction resumes. Force the finished compaction to
+ // commit so it won't resume following a crash.
+ s.be.ForceCommit()
+ trace.Step("physically apply compaction")
+ }
+ if err != nil {
+ return nil, err
+ }
+ if result.err != nil {
+ return nil, result.err
+ }
+ resp := result.resp.(*pb.CompactionResponse)
+ if resp == nil {
+ resp = &pb.CompactionResponse{}
+ }
+ if resp.Header == nil {
+ resp.Header = &pb.ResponseHeader{}
+ }
+ resp.Header.Revision = s.kv.Rev()
+ trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
+ return resp, nil
+}
+
+func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ // no id given? choose one
+ for r.ID == int64(lease.NoLease) {
+ // only use positive int64 id's
+ r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
+ }
+ resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.LeaseGrantResponse), nil
+}
+
+func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.LeaseRevokeResponse), nil
+}
+
+func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
+ ttl, err := s.lessor.Renew(id)
+ if err == nil { // already requested to primary lessor(leader)
+ return ttl, nil
+ }
+ if err != lease.ErrNotPrimary {
+ return -1, err
+ }
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+
+ // renewals don't go through raft; forward to leader manually
+ for cctx.Err() == nil && err != nil {
+ leader, lerr := s.waitLeader(cctx)
+ if lerr != nil {
+ return -1, lerr
+ }
+ for _, url := range leader.PeerURLs {
+ lurl := url + leasehttp.LeasePrefix
+ ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
+ if err == nil || err == lease.ErrLeaseNotFound {
+ return ttl, err
+ }
+ }
+ }
+
+ if cctx.Err() == context.DeadlineExceeded {
+ return -1, ErrTimeout
+ }
+ return -1, ErrCanceled
+}
+
+func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ if s.Leader() == s.ID() {
+ // primary; timetolive directly from leader
+ le := s.lessor.Lookup(lease.LeaseID(r.ID))
+ if le == nil {
+ return nil, lease.ErrLeaseNotFound
+ }
+ // TODO: fill out ResponseHeader
+ resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
+ if r.Keys {
+ ks := le.Keys()
+ kbs := make([][]byte, len(ks))
+ for i := range ks {
+ kbs[i] = []byte(ks[i])
+ }
+ resp.Keys = kbs
+ }
+ return resp, nil
+ }
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+
+ // forward to leader
+ for cctx.Err() == nil {
+ leader, err := s.waitLeader(cctx)
+ if err != nil {
+ return nil, err
+ }
+ for _, url := range leader.PeerURLs {
+ lurl := url + leasehttp.LeaseInternalPrefix
+ resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
+ if err == nil {
+ return resp.LeaseTimeToLiveResponse, nil
+ }
+ if err == lease.ErrLeaseNotFound {
+ return nil, err
+ }
+ }
+ }
+
+ if cctx.Err() == context.DeadlineExceeded {
+ return nil, ErrTimeout
+ }
+ return nil, ErrCanceled
+}
+
+func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+ ls := s.lessor.Leases()
+ lss := make([]*pb.LeaseStatus, len(ls))
+ for i := range ls {
+ lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)}
+ }
+ return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil
+}
+
+func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
+ leader := s.cluster.Member(s.Leader())
+ for leader == nil {
+ // wait an election
+ dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond
+ select {
+ case <-time.After(dur):
+ leader = s.cluster.Member(s.Leader())
+ case <-s.stopping:
+ return nil, ErrStopped
+ case <-ctx.Done():
+ return nil, ErrNoLeader
+ }
+ }
+ if leader == nil || len(leader.PeerURLs) == 0 {
+ return nil, ErrNoLeader
+ }
+ return leader, nil
+}
+
+func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AlarmResponse), nil
+}
+
+func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+ resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthEnableResponse), nil
+}
+
+func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthDisableResponse), nil
+}
+
+func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ if err := s.linearizableReadNotify(ctx); err != nil {
+ return nil, err
+ }
+
+ lg := s.getLogger()
+
+ var resp proto.Message
+ for {
+ checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
+ if err != nil {
+ if err != auth.ErrAuthNotEnabled {
+ if lg != nil {
+ lg.Warn(
+ "invalid authentication was requested",
+ zap.String("user", r.Name),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("invalid authentication request to user %s was issued", r.Name)
+ }
+ }
+ return nil, err
+ }
+
+ st, err := s.AuthStore().GenTokenPrefix()
+ if err != nil {
+ return nil, err
+ }
+
+ internalReq := &pb.InternalAuthenticateRequest{
+ Name: r.Name,
+ Password: r.Password,
+ SimpleToken: st,
+ }
+
+ resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
+ if err != nil {
+ return nil, err
+ }
+ if checkedRevision == s.AuthStore().Revision() {
+ break
+ }
+
+ if lg != nil {
+ lg.Info("revision when password checked became stale; retrying")
+ } else {
+ plog.Infof("revision when password checked is obsolete, retrying")
+ }
+ }
+
+ return resp.(*pb.AuthenticateResponse), nil
+}
+
+func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserAddResponse), nil
+}
+
+func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserDeleteResponse), nil
+}
+
+func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserChangePasswordResponse), nil
+}
+
+func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserGrantRoleResponse), nil
+}
+
+func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserGetResponse), nil
+}
+
+func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserListResponse), nil
+}
+
+func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserRevokeRoleResponse), nil
+}
+
+func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleAddResponse), nil
+}
+
+func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleGrantPermissionResponse), nil
+}
+
+func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleGetResponse), nil
+}
+
+func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleListResponse), nil
+}
+
+func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleRevokePermissionResponse), nil
+}
+
+func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleDeleteResponse), nil
+}
+
+func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
+ result, err := s.processInternalRaftRequestOnce(ctx, r)
+ if err != nil {
+ return nil, err
+ }
+ if result.err != nil {
+ return nil, result.err
+ }
+ if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.trace != nil {
+ applyStart := result.trace.GetStartTime()
+ // The trace object is created in apply. Here reset the start time to trace
+ // the raft request time by the difference between the request start time
+ // and apply start time
+ result.trace.SetStartTime(startTime)
+ result.trace.InsertStep(0, applyStart, "process raft request")
+ result.trace.LogIfLong(traceThreshold)
+ }
+ return result.resp, nil
+}
+
+func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
+ return s.raftRequestOnce(ctx, r)
+}
+
+// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
+func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
+ trace := traceutil.Get(ctx)
+ ai, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+ if ai == nil {
+ // chk expects non-nil AuthInfo; use empty credentials
+ ai = &auth.AuthInfo{}
+ }
+ if err = chk(ai); err != nil {
+ return err
+ }
+ trace.Step("get authentication metadata")
+ // fetch response for serialized request
+ get()
+ // check for stale token revision in case the auth store was updated while
+ // the request has been handled.
+ if ai.Revision != 0 && ai.Revision != s.authStore.Revision() {
+ return auth.ErrAuthOldRevision
+ }
+ return nil
+}
+
+func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
+ ai := s.getAppliedIndex()
+ ci := s.getCommittedIndex()
+ if ci > ai+maxGapBetweenApplyAndCommitIndex {
+ return nil, ErrTooManyRequests
+ }
+
+ r.Header = &pb.RequestHeader{
+ ID: s.reqIDGen.Next(),
+ }
+
+ authInfo, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if authInfo != nil {
+ r.Header.Username = authInfo.Username
+ r.Header.AuthRevision = authInfo.Revision
+ }
+
+ data, err := r.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) > int(s.Cfg.MaxRequestBytes) {
+ return nil, ErrRequestTooLarge
+ }
+
+ id := r.ID
+ if id == 0 {
+ id = r.Header.ID
+ }
+ ch := s.w.Register(id)
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+
+ start := time.Now()
+ err = s.r.Propose(cctx, data)
+ if err != nil {
+ proposalsFailed.Inc()
+ s.w.Trigger(id, nil) // GC wait
+ return nil, err
+ }
+ proposalsPending.Inc()
+ defer proposalsPending.Dec()
+
+ select {
+ case x := <-ch:
+ return x.(*applyResult), nil
+ case <-cctx.Done():
+ proposalsFailed.Inc()
+ s.w.Trigger(id, nil) // GC wait
+ return nil, s.parseProposeCtxErr(cctx.Err(), start)
+ case <-s.done:
+ return nil, ErrStopped
+ }
+}
+
+// Watchable returns a watchable interface attached to the etcdserver.
+func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
+
+func (s *EtcdServer) linearizableReadLoop() {
+ var rs raft.ReadState
+
+ for {
+ ctxToSend := make([]byte, 8)
+ id1 := s.reqIDGen.Next()
+ binary.BigEndian.PutUint64(ctxToSend, id1)
+ leaderChangedNotifier := s.leaderChangedNotify()
+ select {
+ case <-leaderChangedNotifier:
+ continue
+ case <-s.readwaitc:
+ case <-s.stopping:
+ return
+ }
+
+ nextnr := newNotifier()
+
+ s.readMu.Lock()
+ nr := s.readNotifier
+ s.readNotifier = nextnr
+ s.readMu.Unlock()
+
+ lg := s.getLogger()
+ cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+ if err := s.r.ReadIndex(cctx, ctxToSend); err != nil {
+ cancel()
+ if err == raft.ErrStopped {
+ return
+ }
+ if lg != nil {
+ lg.Warn("failed to get read index from Raft", zap.Error(err))
+ } else {
+ plog.Errorf("failed to get read index from raft: %v", err)
+ }
+ readIndexFailed.Inc()
+ nr.notify(err)
+ continue
+ }
+ cancel()
+
+ var (
+ timeout bool
+ done bool
+ )
+ for !timeout && !done {
+ select {
+ case rs = <-s.r.readStateC:
+ done = bytes.Equal(rs.RequestCtx, ctxToSend)
+ if !done {
+ // a previous request might time out. now we should ignore the response of it and
+ // continue waiting for the response of the current requests.
+ id2 := uint64(0)
+ if len(rs.RequestCtx) == 8 {
+ id2 = binary.BigEndian.Uint64(rs.RequestCtx)
+ }
+ if lg != nil {
+ lg.Warn(
+ "ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader",
+ zap.Uint64("sent-request-id", id1),
+ zap.Uint64("received-request-id", id2),
+ )
+ } else {
+ plog.Warningf("ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader (request ID want %d, got %d)", id1, id2)
+ }
+ slowReadIndex.Inc()
+ }
+ case <-leaderChangedNotifier:
+ timeout = true
+ readIndexFailed.Inc()
+ // return a retryable error.
+ nr.notify(ErrLeaderChanged)
+ case <-time.After(s.Cfg.ReqTimeout()):
+ if lg != nil {
+ lg.Warn("timed out waiting for read index response (local node might have slow network)", zap.Duration("timeout", s.Cfg.ReqTimeout()))
+ } else {
+ plog.Warningf("timed out waiting for read index response (local node might have slow network)")
+ }
+ nr.notify(ErrTimeout)
+ timeout = true
+ slowReadIndex.Inc()
+ case <-s.stopping:
+ return
+ }
+ }
+ if !done {
+ continue
+ }
+
+ if ai := s.getAppliedIndex(); ai < rs.Index {
+ select {
+ case <-s.applyWait.Wait(rs.Index):
+ case <-s.stopping:
+ return
+ }
+ }
+ // unblock all l-reads requested at indices before rs.Index
+ nr.notify(nil)
+ }
+}
+
+func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
+ s.readMu.RLock()
+ nc := s.readNotifier
+ s.readMu.RUnlock()
+
+ // signal linearizable loop for current notify if it hasn't been already
+ select {
+ case s.readwaitc <- struct{}{}:
+ default:
+ }
+
+ // wait for read state notification
+ select {
+ case <-nc.c:
+ return nc.err
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-s.done:
+ return ErrStopped
+ }
+}
+
+func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
+ authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
+ if authInfo != nil || err != nil {
+ return authInfo, err
+ }
+ if !s.Cfg.ClientCertAuthEnabled {
+ return nil, nil
+ }
+ authInfo = s.AuthStore().AuthInfoFromTLS(ctx)
+ return authInfo, nil
+
+}
diff --git a/vendor/go.etcd.io/etcd/lease/doc.go b/vendor/go.etcd.io/etcd/lease/doc.go
new file mode 100644
index 000000000000..a74eaf76fc59
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/lease/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package lease provides an interface and implementation for time-limited leases over arbitrary resources.
+package lease
diff --git a/vendor/go.etcd.io/etcd/lease/lease_queue.go b/vendor/go.etcd.io/etcd/lease/lease_queue.go
new file mode 100644
index 000000000000..17ddb358e195
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/lease/lease_queue.go
@@ -0,0 +1,106 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import "container/heap"
+
+// LeaseWithTime contains lease object with a time.
+// For the lessor's lease heap, time identifies the lease expiration time.
+// For the lessor's lease checkpoint heap, the time identifies the next lease checkpoint time.
+type LeaseWithTime struct {
+ id LeaseID
+ // Unix nanos timestamp.
+ time int64
+ index int
+}
+
+type LeaseQueue []*LeaseWithTime
+
+func (pq LeaseQueue) Len() int { return len(pq) }
+
+func (pq LeaseQueue) Less(i, j int) bool {
+ return pq[i].time < pq[j].time
+}
+
+func (pq LeaseQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+ pq[i].index = i
+ pq[j].index = j
+}
+
+func (pq *LeaseQueue) Push(x interface{}) {
+ n := len(*pq)
+ item := x.(*LeaseWithTime)
+ item.index = n
+ *pq = append(*pq, item)
+}
+
+func (pq *LeaseQueue) Pop() interface{} {
+ old := *pq
+ n := len(old)
+ item := old[n-1]
+ item.index = -1 // for safety
+ *pq = old[0 : n-1]
+ return item
+}
+
+// LeaseExpiredNotifier is a queue used to notify lessor to revoke expired lease.
+// Only save one item for a lease, `Register` will update time of the corresponding lease.
+type LeaseExpiredNotifier struct {
+ m map[LeaseID]*LeaseWithTime
+ queue LeaseQueue
+}
+
+func newLeaseExpiredNotifier() *LeaseExpiredNotifier {
+ return &LeaseExpiredNotifier{
+ m: make(map[LeaseID]*LeaseWithTime),
+ queue: make(LeaseQueue, 0),
+ }
+}
+
+func (mq *LeaseExpiredNotifier) Init() {
+ heap.Init(&mq.queue)
+ mq.m = make(map[LeaseID]*LeaseWithTime)
+ for _, item := range mq.queue {
+ mq.m[item.id] = item
+ }
+}
+
+func (mq *LeaseExpiredNotifier) RegisterOrUpdate(item *LeaseWithTime) {
+ if old, ok := mq.m[item.id]; ok {
+ old.time = item.time
+ heap.Fix(&mq.queue, old.index)
+ } else {
+ heap.Push(&mq.queue, item)
+ mq.m[item.id] = item
+ }
+}
+
+func (mq *LeaseExpiredNotifier) Unregister() *LeaseWithTime {
+ item := heap.Pop(&mq.queue).(*LeaseWithTime)
+ delete(mq.m, item.id)
+ return item
+}
+
+func (mq *LeaseExpiredNotifier) Poll() *LeaseWithTime {
+ if mq.Len() == 0 {
+ return nil
+ }
+ return mq.queue[0]
+}
+
+func (mq *LeaseExpiredNotifier) Len() int {
+ return len(mq.m)
+}
diff --git a/vendor/go.etcd.io/etcd/lease/leasehttp/doc.go b/vendor/go.etcd.io/etcd/lease/leasehttp/doc.go
new file mode 100644
index 000000000000..8177a37b663d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/lease/leasehttp/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package leasehttp serves lease renewals made through HTTP requests.
+package leasehttp
diff --git a/vendor/go.etcd.io/etcd/lease/leasehttp/http.go b/vendor/go.etcd.io/etcd/lease/leasehttp/http.go
new file mode 100644
index 000000000000..67e916dba9e5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/lease/leasehttp/http.go
@@ -0,0 +1,248 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasehttp
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/lease/leasepb"
+ "go.etcd.io/etcd/pkg/httputil"
+)
+
+var (
+ LeasePrefix = "/leases"
+ LeaseInternalPrefix = "/leases/internal"
+ applyTimeout = time.Second
+ ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out")
+)
+
+// NewHandler returns an http Handler for lease renewals
+func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler {
+ return &leaseHandler{l, waitch}
+}
+
+type leaseHandler struct {
+ l lease.Lessor
+ waitch func() <-chan struct{}
+}
+
+func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ defer r.Body.Close()
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "error reading body", http.StatusBadRequest)
+ return
+ }
+
+ var v []byte
+ switch r.URL.Path {
+ case LeasePrefix:
+ lreq := pb.LeaseKeepAliveRequest{}
+ if uerr := lreq.Unmarshal(b); uerr != nil {
+ http.Error(w, "error unmarshalling request", http.StatusBadRequest)
+ return
+ }
+ select {
+ case <-h.waitch():
+ case <-time.After(applyTimeout):
+ http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
+ return
+ }
+ ttl, rerr := h.l.Renew(lease.LeaseID(lreq.ID))
+ if rerr != nil {
+ if rerr == lease.ErrLeaseNotFound {
+ http.Error(w, rerr.Error(), http.StatusNotFound)
+ return
+ }
+
+ http.Error(w, rerr.Error(), http.StatusBadRequest)
+ return
+ }
+ // TODO: fill out ResponseHeader
+ resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
+ v, err = resp.Marshal()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ case LeaseInternalPrefix:
+ lreq := leasepb.LeaseInternalRequest{}
+ if lerr := lreq.Unmarshal(b); lerr != nil {
+ http.Error(w, "error unmarshalling request", http.StatusBadRequest)
+ return
+ }
+ select {
+ case <-h.waitch():
+ case <-time.After(applyTimeout):
+ http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
+ return
+ }
+ l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID))
+ if l == nil {
+ http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound)
+ return
+ }
+ // TODO: fill out ResponseHeader
+ resp := &leasepb.LeaseInternalResponse{
+ LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{
+ Header: &pb.ResponseHeader{},
+ ID: lreq.LeaseTimeToLiveRequest.ID,
+ TTL: int64(l.Remaining().Seconds()),
+ GrantedTTL: l.TTL(),
+ },
+ }
+ if lreq.LeaseTimeToLiveRequest.Keys {
+ ks := l.Keys()
+ kbs := make([][]byte, len(ks))
+ for i := range ks {
+ kbs[i] = []byte(ks[i])
+ }
+ resp.LeaseTimeToLiveResponse.Keys = kbs
+ }
+
+ v, err = resp.Marshal()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ default:
+ http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/protobuf")
+ w.Write(v)
+}
+
+// RenewHTTP renews a lease at a given primary server.
+// TODO: Batch request in future?
+func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) {
+ // will post lreq protobuf to leader
+ lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal()
+ if err != nil {
+ return -1, err
+ }
+
+ cc := &http.Client{Transport: rt}
+ req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
+ if err != nil {
+ return -1, err
+ }
+ req.Header.Set("Content-Type", "application/protobuf")
+ req.Cancel = ctx.Done()
+
+ resp, err := cc.Do(req)
+ if err != nil {
+ return -1, err
+ }
+ b, err := readResponse(resp)
+ if err != nil {
+ return -1, err
+ }
+
+ if resp.StatusCode == http.StatusRequestTimeout {
+ return -1, ErrLeaseHTTPTimeout
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return -1, lease.ErrLeaseNotFound
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return -1, fmt.Errorf("lease: unknown error(%s)", string(b))
+ }
+
+ lresp := &pb.LeaseKeepAliveResponse{}
+ if err := lresp.Unmarshal(b); err != nil {
+ return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
+ }
+ if lresp.ID != int64(id) {
+ return -1, fmt.Errorf("lease: renew id mismatch")
+ }
+ return lresp.TTL, nil
+}
+
+// TimeToLiveHTTP retrieves lease information of the given lease ID.
+func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) {
+ // will post lreq protobuf to leader
+ lreq, err := (&leasepb.LeaseInternalRequest{
+ LeaseTimeToLiveRequest: &pb.LeaseTimeToLiveRequest{
+ ID: int64(id),
+ Keys: keys,
+ },
+ }).Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/protobuf")
+
+ req = req.WithContext(ctx)
+
+ cc := &http.Client{Transport: rt}
+ var b []byte
+ // buffer errc channel so that errc don't block inside the go routinue
+ resp, err := cc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ b, err = readResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode == http.StatusRequestTimeout {
+ return nil, ErrLeaseHTTPTimeout
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ return nil, lease.ErrLeaseNotFound
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("lease: unknown error(%s)", string(b))
+ }
+
+ lresp := &leasepb.LeaseInternalResponse{}
+ if err := lresp.Unmarshal(b); err != nil {
+ return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
+ }
+ if lresp.LeaseTimeToLiveResponse.ID != int64(id) {
+ return nil, fmt.Errorf("lease: renew id mismatch")
+ }
+ return lresp, nil
+}
+
+func readResponse(resp *http.Response) (b []byte, err error) {
+ b, err = ioutil.ReadAll(resp.Body)
+ httputil.GracefulClose(resp)
+ return
+}
diff --git a/vendor/go.etcd.io/etcd/lease/leasepb/lease.pb.go b/vendor/go.etcd.io/etcd/lease/leasepb/lease.pb.go
new file mode 100644
index 000000000000..16637ee7e95d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/lease/leasepb/lease.pb.go
@@ -0,0 +1,620 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: lease.proto
+
+/*
+ Package leasepb is a generated protocol buffer package.
+
+ It is generated from these files:
+ lease.proto
+
+ It has these top-level messages:
+ Lease
+ LeaseInternalRequest
+ LeaseInternalResponse
+*/
+package leasepb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ etcdserverpb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Lease struct {
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ RemainingTTL int64 `protobuf:"varint,3,opt,name=RemainingTTL,proto3" json:"RemainingTTL,omitempty"`
+}
+
+func (m *Lease) Reset() { *m = Lease{} }
+func (m *Lease) String() string { return proto.CompactTextString(m) }
+func (*Lease) ProtoMessage() {}
+func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{0} }
+
+type LeaseInternalRequest struct {
+ LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest" json:"LeaseTimeToLiveRequest,omitempty"`
+}
+
+func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} }
+func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseInternalRequest) ProtoMessage() {}
+func (*LeaseInternalRequest) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{1} }
+
+type LeaseInternalResponse struct {
+ LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse" json:"LeaseTimeToLiveResponse,omitempty"`
+}
+
+func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} }
+func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseInternalResponse) ProtoMessage() {}
+func (*LeaseInternalResponse) Descriptor() ([]byte, []int) { return fileDescriptorLease, []int{2} }
+
+func init() {
+ proto.RegisterType((*Lease)(nil), "leasepb.Lease")
+ proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest")
+ proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse")
+}
+func (m *Lease) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Lease) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.ID != 0 {
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintLease(dAtA, i, uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintLease(dAtA, i, uint64(m.TTL))
+ }
+ if m.RemainingTTL != 0 {
+ dAtA[i] = 0x18
+ i++
+ i = encodeVarintLease(dAtA, i, uint64(m.RemainingTTL))
+ }
+ return i, nil
+}
+
+func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.LeaseTimeToLiveRequest != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveRequest.Size()))
+ n1, err := m.LeaseTimeToLiveRequest.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n1
+ }
+ return i, nil
+}
+
+func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ if m.LeaseTimeToLiveResponse != nil {
+ dAtA[i] = 0xa
+ i++
+ i = encodeVarintLease(dAtA, i, uint64(m.LeaseTimeToLiveResponse.Size()))
+ n2, err := m.LeaseTimeToLiveResponse.MarshalTo(dAtA[i:])
+ if err != nil {
+ return 0, err
+ }
+ i += n2
+ }
+ return i, nil
+}
+
+func encodeVarintLease(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Lease) Size() (n int) {
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovLease(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovLease(uint64(m.TTL))
+ }
+ if m.RemainingTTL != 0 {
+ n += 1 + sovLease(uint64(m.RemainingTTL))
+ }
+ return n
+}
+
+func (m *LeaseInternalRequest) Size() (n int) {
+ var l int
+ _ = l
+ if m.LeaseTimeToLiveRequest != nil {
+ l = m.LeaseTimeToLiveRequest.Size()
+ n += 1 + l + sovLease(uint64(l))
+ }
+ return n
+}
+
+func (m *LeaseInternalResponse) Size() (n int) {
+ var l int
+ _ = l
+ if m.LeaseTimeToLiveResponse != nil {
+ l = m.LeaseTimeToLiveResponse.Size()
+ n += 1 + l + sovLease(uint64(l))
+ }
+ return n
+}
+
+func sovLease(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozLease(x uint64) (n int) {
+ return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Lease) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Lease: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RemainingTTL", wireType)
+ }
+ m.RemainingTTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RemainingTTL |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLease(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLease
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseInternalRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLease
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseTimeToLiveRequest == nil {
+ m.LeaseTimeToLiveRequest = &etcdserverpb.LeaseTimeToLiveRequest{}
+ }
+ if err := m.LeaseTimeToLiveRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLease(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLease
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseInternalResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveResponse", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLease
+ }
+ postIndex := iNdEx + msglen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseTimeToLiveResponse == nil {
+ m.LeaseTimeToLiveResponse = &etcdserverpb.LeaseTimeToLiveResponse{}
+ }
+ if err := m.LeaseTimeToLiveResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLease(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthLease
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipLease(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthLease
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipLease(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) }
+
+var fileDescriptorLease = []byte{
+ // 253 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
+ 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
+ 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45,
+ 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92,
+ 0x21, 0xea, 0x94, 0x7c, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48,
+ 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8,
+ 0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0x21, 0x25, 0x2e, 0x9e, 0xa0, 0xd4, 0xdc, 0xc4, 0xcc, 0xbc,
+ 0xcc, 0xbc, 0x74, 0x90, 0x14, 0x33, 0x58, 0x0a, 0x45, 0x4c, 0xa9, 0x84, 0x4b, 0x04, 0x6c, 0x9c,
+ 0x67, 0x5e, 0x49, 0x6a, 0x51, 0x5e, 0x62, 0x4e, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x50,
+ 0x0c, 0x97, 0x18, 0x58, 0x3c, 0x24, 0x33, 0x37, 0x35, 0x24, 0xdf, 0x27, 0xb3, 0x2c, 0x15, 0x2a,
+ 0x03, 0xb6, 0x91, 0xdb, 0x48, 0x45, 0x0f, 0xd9, 0x7d, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43,
+ 0xa9, 0x82, 0x4b, 0x14, 0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71,
+ 0x0c, 0x2d, 0x10, 0x29, 0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93,
+ 0xc4, 0x89, 0x87, 0x72, 0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91,
+ 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xd7, 0x18,
+ 0x10, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x9f, 0x8b, 0x6c, 0xb5, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/lease/leasepb/lease.proto b/vendor/go.etcd.io/etcd/lease/leasepb/lease.proto
new file mode 100644
index 000000000000..1169d9f10a98
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/lease/leasepb/lease.proto
@@ -0,0 +1,25 @@
+syntax = "proto3";
+package leasepb;
+
+import "gogoproto/gogo.proto";
+import "etcd/etcdserver/etcdserverpb/rpc.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message Lease {
+ int64 ID = 1;
+ int64 TTL = 2;
+ int64 RemainingTTL = 3;
+}
+
+message LeaseInternalRequest {
+ etcdserverpb.LeaseTimeToLiveRequest LeaseTimeToLiveRequest = 1;
+}
+
+message LeaseInternalResponse {
+ etcdserverpb.LeaseTimeToLiveResponse LeaseTimeToLiveResponse = 1;
+}
diff --git a/vendor/go.etcd.io/etcd/lease/lessor.go b/vendor/go.etcd.io/etcd/lease/lessor.go
new file mode 100644
index 000000000000..b16099fbf1fb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/lease/lessor.go
@@ -0,0 +1,938 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+ "container/heap"
+ "context"
+ "encoding/binary"
+ "errors"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "go.etcd.io/etcd/lease/leasepb"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.uber.org/zap"
+)
+
+// NoLease is a special LeaseID representing the absence of a lease.
+const NoLease = LeaseID(0)
+
+// MaxLeaseTTL is the maximum lease TTL value
+const MaxLeaseTTL = 9000000000
+
+var (
+ forever = time.Time{}
+
+ leaseBucketName = []byte("lease")
+
+ // maximum number of leases to revoke per second; configurable for tests
+ leaseRevokeRate = 1000
+
+ // maximum number of lease checkpoints recorded to the consensus log per second; configurable for tests
+ leaseCheckpointRate = 1000
+
+ // the default interval of lease checkpoint
+ defaultLeaseCheckpointInterval = 5 * time.Minute
+
+ // maximum number of lease checkpoints to batch into a single consensus log entry
+ maxLeaseCheckpointBatchSize = 1000
+
+ // the default interval to check if the expired lease is revoked
+ defaultExpiredleaseRetryInterval = 3 * time.Second
+
+ ErrNotPrimary = errors.New("not a primary lessor")
+ ErrLeaseNotFound = errors.New("lease not found")
+ ErrLeaseExists = errors.New("lease already exists")
+ ErrLeaseTTLTooLarge = errors.New("too large lease TTL")
+)
+
+// TxnDelete is a TxnWrite that only permits deletes. Defined here
+// to avoid circular dependency with mvcc.
+type TxnDelete interface {
+ DeleteRange(key, end []byte) (n, rev int64)
+ End()
+}
+
+// RangeDeleter is a TxnDelete constructor.
+type RangeDeleter func() TxnDelete
+
+// Checkpointer permits checkpointing of lease remaining TTLs to the consensus log. Defined here to
+// avoid circular dependency with mvcc.
+type Checkpointer func(ctx context.Context, lc *pb.LeaseCheckpointRequest)
+
+type LeaseID int64
+
+// Lessor owns leases. It can grant, revoke, renew and modify leases for lessee.
+type Lessor interface {
+ // SetRangeDeleter lets the lessor create TxnDeletes to the store.
+ // Lessor deletes the items in the revoked or expired lease by creating
+ // new TxnDeletes.
+ SetRangeDeleter(rd RangeDeleter)
+
+ SetCheckpointer(cp Checkpointer)
+
+ // Grant grants a lease that expires at least after TTL seconds.
+ Grant(id LeaseID, ttl int64) (*Lease, error)
+ // Revoke revokes a lease with given ID. The item attached to the
+ // given lease will be removed. If the ID does not exist, an error
+ // will be returned.
+ Revoke(id LeaseID) error
+
+ // Checkpoint applies the remainingTTL of a lease. The remainingTTL is used in Promote to set
+ // the expiry of leases to less than the full TTL when possible.
+ Checkpoint(id LeaseID, remainingTTL int64) error
+
+ // Attach attaches given leaseItem to the lease with given LeaseID.
+ // If the lease does not exist, an error will be returned.
+ Attach(id LeaseID, items []LeaseItem) error
+
+ // GetLease returns LeaseID for given item.
+ // If no lease found, NoLease value will be returned.
+ GetLease(item LeaseItem) LeaseID
+
+ // Detach detaches given leaseItem from the lease with given LeaseID.
+ // If the lease does not exist, an error will be returned.
+ Detach(id LeaseID, items []LeaseItem) error
+
+ // Promote promotes the lessor to be the primary lessor. Primary lessor manages
+ // the expiration and renew of leases.
+ // Newly promoted lessor renew the TTL of all lease to extend + previous TTL.
+ Promote(extend time.Duration)
+
+ // Demote demotes the lessor from being the primary lessor.
+ Demote()
+
+ // Renew renews a lease with given ID. It returns the renewed TTL. If the ID does not exist,
+ // an error will be returned.
+ Renew(id LeaseID) (int64, error)
+
+ // Lookup gives the lease at a given lease id, if any
+ Lookup(id LeaseID) *Lease
+
+ // Leases lists all leases.
+ Leases() []*Lease
+
+ // ExpiredLeasesC returns a chan that is used to receive expired leases.
+ ExpiredLeasesC() <-chan []*Lease
+
+ // Recover recovers the lessor state from the given backend and RangeDeleter.
+ Recover(b backend.Backend, rd RangeDeleter)
+
+ // Stop stops the lessor for managing leases. The behavior of calling Stop multiple
+ // times is undefined.
+ Stop()
+}
+
+// lessor implements Lessor interface.
+// TODO: use clockwork for testability.
+type lessor struct {
+ mu sync.RWMutex
+
+ // demotec is set when the lessor is the primary.
+ // demotec will be closed if the lessor is demoted.
+ demotec chan struct{}
+
+ leaseMap map[LeaseID]*Lease
+ leaseExpiredNotifier *LeaseExpiredNotifier
+ leaseCheckpointHeap LeaseQueue
+ itemMap map[LeaseItem]LeaseID
+
+ // When a lease expires, the lessor will delete the
+ // leased range (or key) by the RangeDeleter.
+ rd RangeDeleter
+
+ // When a lease's deadline should be persisted to preserve the remaining TTL across leader
+ // elections and restarts, the lessor will checkpoint the lease by the Checkpointer.
+ cp Checkpointer
+
+ // backend to persist leases. We only persist lease ID and expiry for now.
+ // The leased items can be recovered by iterating all the keys in kv.
+ b backend.Backend
+
+ // minLeaseTTL is the minimum lease TTL that can be granted for a lease. Any
+ // requests for shorter TTLs are extended to the minimum TTL.
+ minLeaseTTL int64
+
+ expiredC chan []*Lease
+ // stopC is a channel whose closure indicates that the lessor should be stopped.
+ stopC chan struct{}
+ // doneC is a channel whose closure indicates that the lessor is stopped.
+ doneC chan struct{}
+
+ lg *zap.Logger
+
+ // Wait duration between lease checkpoints.
+ checkpointInterval time.Duration
+ // the interval to check if the expired lease is revoked
+ expiredLeaseRetryInterval time.Duration
+}
+
+type LessorConfig struct {
+ MinLeaseTTL int64
+ CheckpointInterval time.Duration
+ ExpiredLeasesRetryInterval time.Duration
+}
+
+func NewLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) Lessor {
+ return newLessor(lg, b, cfg)
+}
+
+func newLessor(lg *zap.Logger, b backend.Backend, cfg LessorConfig) *lessor {
+ checkpointInterval := cfg.CheckpointInterval
+ expiredLeaseRetryInterval := cfg.ExpiredLeasesRetryInterval
+ if checkpointInterval == 0 {
+ checkpointInterval = defaultLeaseCheckpointInterval
+ }
+ if expiredLeaseRetryInterval == 0 {
+ expiredLeaseRetryInterval = defaultExpiredleaseRetryInterval
+ }
+ l := &lessor{
+ leaseMap: make(map[LeaseID]*Lease),
+ itemMap: make(map[LeaseItem]LeaseID),
+ leaseExpiredNotifier: newLeaseExpiredNotifier(),
+ leaseCheckpointHeap: make(LeaseQueue, 0),
+ b: b,
+ minLeaseTTL: cfg.MinLeaseTTL,
+ checkpointInterval: checkpointInterval,
+ expiredLeaseRetryInterval: expiredLeaseRetryInterval,
+ // expiredC is a small buffered chan to avoid unnecessary blocking.
+ expiredC: make(chan []*Lease, 16),
+ stopC: make(chan struct{}),
+ doneC: make(chan struct{}),
+ lg: lg,
+ }
+ l.initAndRecover()
+
+ go l.runLoop()
+
+ return l
+}
+
+// isPrimary indicates if this lessor is the primary lessor. The primary
+// lessor manages lease expiration and renew.
+//
+// in etcd, raft leader is the primary. Thus there might be two primary
+// leaders at the same time (raft allows concurrent leader but with different term)
+// for at most a leader election timeout.
+// The old primary leader cannot affect the correctness since its proposal has a
+// smaller term and will not be committed.
+//
+// TODO: raft follower do not forward lease management proposals. There might be a
+// very small window (within second normally which depends on go scheduling) that
+// a raft follow is the primary between the raft leader demotion and lessor demotion.
+// Usually this should not be a problem. Lease should not be that sensitive to timing.
+func (le *lessor) isPrimary() bool {
+ return le.demotec != nil
+}
+
+func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.rd = rd
+}
+
+func (le *lessor) SetCheckpointer(cp Checkpointer) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.cp = cp
+}
+
+func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
+ if id == NoLease {
+ return nil, ErrLeaseNotFound
+ }
+
+ if ttl > MaxLeaseTTL {
+ return nil, ErrLeaseTTLTooLarge
+ }
+
+ // TODO: when lessor is under high load, it should give out lease
+ // with longer TTL to reduce renew load.
+ l := &Lease{
+ ID: id,
+ ttl: ttl,
+ itemSet: make(map[LeaseItem]struct{}),
+ revokec: make(chan struct{}),
+ }
+
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ if _, ok := le.leaseMap[id]; ok {
+ return nil, ErrLeaseExists
+ }
+
+ if l.ttl < le.minLeaseTTL {
+ l.ttl = le.minLeaseTTL
+ }
+
+ if le.isPrimary() {
+ l.refresh(0)
+ } else {
+ l.forever()
+ }
+
+ le.leaseMap[id] = l
+ l.persistTo(le.b)
+
+ leaseTotalTTLs.Observe(float64(l.ttl))
+ leaseGranted.Inc()
+
+ if le.isPrimary() {
+ item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.scheduleCheckpointIfNeeded(l)
+ }
+
+ return l, nil
+}
+
+func (le *lessor) Revoke(id LeaseID) error {
+ le.mu.Lock()
+
+ l := le.leaseMap[id]
+ if l == nil {
+ le.mu.Unlock()
+ return ErrLeaseNotFound
+ }
+ defer close(l.revokec)
+ // unlock before doing external work
+ le.mu.Unlock()
+
+ if le.rd == nil {
+ return nil
+ }
+
+ txn := le.rd()
+
+ // sort keys so deletes are in same order among all members,
+ // otherwise the backened hashes will be different
+ keys := l.Keys()
+ sort.StringSlice(keys).Sort()
+ for _, key := range keys {
+ txn.DeleteRange([]byte(key), nil)
+ }
+
+ le.mu.Lock()
+ defer le.mu.Unlock()
+ delete(le.leaseMap, l.ID)
+ // lease deletion needs to be in the same backend transaction with the
+ // kv deletion. Or we might end up with not executing the revoke or not
+ // deleting the keys if etcdserver fails in between.
+ le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID)))
+
+ txn.End()
+
+ leaseRevoked.Inc()
+ return nil
+}
+
+func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ if l, ok := le.leaseMap[id]; ok {
+ // when checkpointing, we only update the remainingTTL, Promote is responsible for applying this to lease expiry
+ l.remainingTTL = remainingTTL
+ if le.isPrimary() {
+ // schedule the next checkpoint as needed
+ le.scheduleCheckpointIfNeeded(l)
+ }
+ }
+ return nil
+}
+
+// Renew renews an existing lease. If the given lease does not exist or
+// has expired, an error will be returned.
+func (le *lessor) Renew(id LeaseID) (int64, error) {
+ le.mu.RLock()
+ if !le.isPrimary() {
+ // forward renew request to primary instead of returning error.
+ le.mu.RUnlock()
+ return -1, ErrNotPrimary
+ }
+
+ demotec := le.demotec
+
+ l := le.leaseMap[id]
+ if l == nil {
+ le.mu.RUnlock()
+ return -1, ErrLeaseNotFound
+ }
+ // Clear remaining TTL when we renew if it is set
+ clearRemainingTTL := le.cp != nil && l.remainingTTL > 0
+
+ le.mu.RUnlock()
+ if l.expired() {
+ select {
+ // A expired lease might be pending for revoking or going through
+ // quorum to be revoked. To be accurate, renew request must wait for the
+ // deletion to complete.
+ case <-l.revokec:
+ return -1, ErrLeaseNotFound
+ // The expired lease might fail to be revoked if the primary changes.
+ // The caller will retry on ErrNotPrimary.
+ case <-demotec:
+ return -1, ErrNotPrimary
+ case <-le.stopC:
+ return -1, ErrNotPrimary
+ }
+ }
+
+ // Clear remaining TTL when we renew if it is set
+ // By applying a RAFT entry only when the remainingTTL is already set, we limit the number
+ // of RAFT entries written per lease to a max of 2 per checkpoint interval.
+ if clearRemainingTTL {
+ le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: []*pb.LeaseCheckpoint{{ID: int64(l.ID), Remaining_TTL: 0}}})
+ }
+
+ le.mu.Lock()
+ l.refresh(0)
+ item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.mu.Unlock()
+
+ leaseRenewed.Inc()
+ return l.ttl, nil
+}
+
+func (le *lessor) Lookup(id LeaseID) *Lease {
+ le.mu.RLock()
+ defer le.mu.RUnlock()
+ return le.leaseMap[id]
+}
+
+func (le *lessor) unsafeLeases() []*Lease {
+ leases := make([]*Lease, 0, len(le.leaseMap))
+ for _, l := range le.leaseMap {
+ leases = append(leases, l)
+ }
+ return leases
+}
+
+func (le *lessor) Leases() []*Lease {
+ le.mu.RLock()
+ ls := le.unsafeLeases()
+ le.mu.RUnlock()
+ sort.Sort(leasesByExpiry(ls))
+ return ls
+}
+
+func (le *lessor) Promote(extend time.Duration) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.demotec = make(chan struct{})
+
+ // refresh the expiries of all leases.
+ for _, l := range le.leaseMap {
+ l.refresh(extend)
+ item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ }
+
+ if len(le.leaseMap) < leaseRevokeRate {
+ // no possibility of lease pile-up
+ return
+ }
+
+ // adjust expiries in case of overlap
+ leases := le.unsafeLeases()
+ sort.Sort(leasesByExpiry(leases))
+
+ baseWindow := leases[0].Remaining()
+ nextWindow := baseWindow + time.Second
+ expires := 0
+ // have fewer expires than the total revoke rate so piled up leases
+ // don't consume the entire revoke limit
+ targetExpiresPerSecond := (3 * leaseRevokeRate) / 4
+ for _, l := range leases {
+ remaining := l.Remaining()
+ if remaining > nextWindow {
+ baseWindow = remaining
+ nextWindow = baseWindow + time.Second
+ expires = 1
+ continue
+ }
+ expires++
+ if expires <= targetExpiresPerSecond {
+ continue
+ }
+ rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond))
+ // If leases are extended by n seconds, leases n seconds ahead of the
+ // base window should be extended by only one second.
+ rateDelay -= float64(remaining - baseWindow)
+ delay := time.Duration(rateDelay)
+ nextWindow = baseWindow + delay
+ l.refresh(delay + extend)
+ item := &LeaseWithTime{id: l.ID, time: l.expiry.UnixNano()}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.scheduleCheckpointIfNeeded(l)
+ }
+}
+
+type leasesByExpiry []*Lease
+
+func (le leasesByExpiry) Len() int { return len(le) }
+func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() }
+func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] }
+
+func (le *lessor) Demote() {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ // set the expiries of all leases to forever
+ for _, l := range le.leaseMap {
+ l.forever()
+ }
+
+ le.clearScheduledLeasesCheckpoints()
+ le.clearLeaseExpiredNotifier()
+
+ if le.demotec != nil {
+ close(le.demotec)
+ le.demotec = nil
+ }
+}
+
+// Attach attaches items to the lease with given ID. When the lease
+// expires, the attached items will be automatically removed.
+// If the given lease does not exist, an error will be returned.
+func (le *lessor) Attach(id LeaseID, items []LeaseItem) error {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ l := le.leaseMap[id]
+ if l == nil {
+ return ErrLeaseNotFound
+ }
+
+ l.mu.Lock()
+ for _, it := range items {
+ l.itemSet[it] = struct{}{}
+ le.itemMap[it] = id
+ }
+ l.mu.Unlock()
+ return nil
+}
+
+func (le *lessor) GetLease(item LeaseItem) LeaseID {
+ le.mu.RLock()
+ id := le.itemMap[item]
+ le.mu.RUnlock()
+ return id
+}
+
+// Detach detaches items from the lease with given ID.
+// If the given lease does not exist, an error will be returned.
+func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ l := le.leaseMap[id]
+ if l == nil {
+ return ErrLeaseNotFound
+ }
+
+ l.mu.Lock()
+ for _, it := range items {
+ delete(l.itemSet, it)
+ delete(le.itemMap, it)
+ }
+ l.mu.Unlock()
+ return nil
+}
+
+func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.b = b
+ le.rd = rd
+ le.leaseMap = make(map[LeaseID]*Lease)
+ le.itemMap = make(map[LeaseItem]LeaseID)
+ le.initAndRecover()
+}
+
+func (le *lessor) ExpiredLeasesC() <-chan []*Lease {
+ return le.expiredC
+}
+
+func (le *lessor) Stop() {
+ close(le.stopC)
+ <-le.doneC
+}
+
+func (le *lessor) runLoop() {
+ defer close(le.doneC)
+
+ for {
+ le.revokeExpiredLeases()
+ le.checkpointScheduledLeases()
+
+ select {
+ case <-time.After(500 * time.Millisecond):
+ case <-le.stopC:
+ return
+ }
+ }
+}
+
+// revokeExpiredLeases finds all leases past their expiry and sends them to epxired channel for
+// to be revoked.
+func (le *lessor) revokeExpiredLeases() {
+ var ls []*Lease
+
+ // rate limit
+ revokeLimit := leaseRevokeRate / 2
+
+ le.mu.RLock()
+ if le.isPrimary() {
+ ls = le.findExpiredLeases(revokeLimit)
+ }
+ le.mu.RUnlock()
+
+ if len(ls) != 0 {
+ select {
+ case <-le.stopC:
+ return
+ case le.expiredC <- ls:
+ default:
+ // the receiver of expiredC is probably busy handling
+ // other stuff
+ // let's try this next time after 500ms
+ }
+ }
+}
+
+// checkpointScheduledLeases finds all scheduled lease checkpoints that are due and
+// submits them to the checkpointer to persist them to the consensus log.
+func (le *lessor) checkpointScheduledLeases() {
+ var cps []*pb.LeaseCheckpoint
+
+ // rate limit
+ for i := 0; i < leaseCheckpointRate/2; i++ {
+ le.mu.Lock()
+ if le.isPrimary() {
+ cps = le.findDueScheduledCheckpoints(maxLeaseCheckpointBatchSize)
+ }
+ le.mu.Unlock()
+
+ if len(cps) != 0 {
+ le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: cps})
+ }
+ if len(cps) < maxLeaseCheckpointBatchSize {
+ return
+ }
+ }
+}
+
+func (le *lessor) clearScheduledLeasesCheckpoints() {
+ le.leaseCheckpointHeap = make(LeaseQueue, 0)
+}
+
+func (le *lessor) clearLeaseExpiredNotifier() {
+ le.leaseExpiredNotifier = newLeaseExpiredNotifier()
+}
+
+// expireExists returns true if expiry items exist.
+// It pops only when expiry item exists.
+// "next" is true, to indicate that it may exist in next attempt.
+func (le *lessor) expireExists() (l *Lease, ok bool, next bool) {
+ if le.leaseExpiredNotifier.Len() == 0 {
+ return nil, false, false
+ }
+
+ item := le.leaseExpiredNotifier.Poll()
+ l = le.leaseMap[item.id]
+ if l == nil {
+ // lease has expired or been revoked
+ // no need to revoke (nothing is expiry)
+ le.leaseExpiredNotifier.Unregister() // O(log N)
+ return nil, false, true
+ }
+ now := time.Now()
+ if now.UnixNano() < item.time /* expiration time */ {
+ // Candidate expirations are caught up, reinsert this item
+ // and no need to revoke (nothing is expiry)
+ return l, false, false
+ }
+
+ // recheck if revoke is complete after retry interval
+ item.time = now.Add(le.expiredLeaseRetryInterval).UnixNano()
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ return l, true, false
+}
+
+// findExpiredLeases loops leases in the leaseMap until reaching expired limit
+// and returns the expired leases that needed to be revoked.
+func (le *lessor) findExpiredLeases(limit int) []*Lease {
+ leases := make([]*Lease, 0, 16)
+
+ for {
+ l, ok, next := le.expireExists()
+ if !ok && !next {
+ break
+ }
+ if !ok {
+ continue
+ }
+ if next {
+ continue
+ }
+
+ if l.expired() {
+ leases = append(leases, l)
+
+ // reach expired limit
+ if len(leases) == limit {
+ break
+ }
+ }
+ }
+
+ return leases
+}
+
+func (le *lessor) scheduleCheckpointIfNeeded(lease *Lease) {
+ if le.cp == nil {
+ return
+ }
+
+ if lease.RemainingTTL() > int64(le.checkpointInterval.Seconds()) {
+ if le.lg != nil {
+ le.lg.Debug("Scheduling lease checkpoint",
+ zap.Int64("leaseID", int64(lease.ID)),
+ zap.Duration("intervalSeconds", le.checkpointInterval),
+ )
+ }
+ heap.Push(&le.leaseCheckpointHeap, &LeaseWithTime{
+ id: lease.ID,
+ time: time.Now().Add(le.checkpointInterval).UnixNano(),
+ })
+ }
+}
+
+func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCheckpoint {
+ if le.cp == nil {
+ return nil
+ }
+
+ now := time.Now()
+ cps := []*pb.LeaseCheckpoint{}
+ for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit {
+ lt := le.leaseCheckpointHeap[0]
+ if lt.time /* next checkpoint time */ > now.UnixNano() {
+ return cps
+ }
+ heap.Pop(&le.leaseCheckpointHeap)
+ var l *Lease
+ var ok bool
+ if l, ok = le.leaseMap[lt.id]; !ok {
+ continue
+ }
+ if !now.Before(l.expiry) {
+ continue
+ }
+ remainingTTL := int64(math.Ceil(l.expiry.Sub(now).Seconds()))
+ if remainingTTL >= l.ttl {
+ continue
+ }
+ if le.lg != nil {
+ le.lg.Debug("Checkpointing lease",
+ zap.Int64("leaseID", int64(lt.id)),
+ zap.Int64("remainingTTL", remainingTTL),
+ )
+ }
+ cps = append(cps, &pb.LeaseCheckpoint{ID: int64(lt.id), Remaining_TTL: remainingTTL})
+ }
+ return cps
+}
+
+func (le *lessor) initAndRecover() {
+ tx := le.b.BatchTx()
+ tx.Lock()
+
+ tx.UnsafeCreateBucket(leaseBucketName)
+ _, vs := tx.UnsafeRange(leaseBucketName, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0)
+ // TODO: copy vs and do decoding outside tx lock if lock contention becomes an issue.
+ for i := range vs {
+ var lpb leasepb.Lease
+ err := lpb.Unmarshal(vs[i])
+ if err != nil {
+ tx.Unlock()
+ panic("failed to unmarshal lease proto item")
+ }
+ ID := LeaseID(lpb.ID)
+ if lpb.TTL < le.minLeaseTTL {
+ lpb.TTL = le.minLeaseTTL
+ }
+ le.leaseMap[ID] = &Lease{
+ ID: ID,
+ ttl: lpb.TTL,
+ // itemSet will be filled in when recover key-value pairs
+ // set expiry to forever, refresh when promoted
+ itemSet: make(map[LeaseItem]struct{}),
+ expiry: forever,
+ revokec: make(chan struct{}),
+ }
+ }
+ le.leaseExpiredNotifier.Init()
+ heap.Init(&le.leaseCheckpointHeap)
+ tx.Unlock()
+
+ le.b.ForceCommit()
+}
+
+type Lease struct {
+ ID LeaseID
+ ttl int64 // time to live of the lease in seconds
+ remainingTTL int64 // remaining time to live in seconds, if zero valued it is considered unset and the full ttl should be used
+ // expiryMu protects concurrent accesses to expiry
+ expiryMu sync.RWMutex
+ // expiry is time when lease should expire. no expiration when expiry.IsZero() is true
+ expiry time.Time
+
+ // mu protects concurrent accesses to itemSet
+ mu sync.RWMutex
+ itemSet map[LeaseItem]struct{}
+ revokec chan struct{}
+}
+
+func (l *Lease) expired() bool {
+ return l.Remaining() <= 0
+}
+
+func (l *Lease) persistTo(b backend.Backend) {
+ key := int64ToBytes(int64(l.ID))
+
+ lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL}
+ val, err := lpb.Marshal()
+ if err != nil {
+ panic("failed to marshal lease proto item")
+ }
+
+ b.BatchTx().Lock()
+ b.BatchTx().UnsafePut(leaseBucketName, key, val)
+ b.BatchTx().Unlock()
+}
+
+// TTL returns the TTL of the Lease.
+func (l *Lease) TTL() int64 {
+ return l.ttl
+}
+
+// RemainingTTL returns the last checkpointed remaining TTL of the lease.
+// TODO(jpbetz): do not expose this utility method
+func (l *Lease) RemainingTTL() int64 {
+ if l.remainingTTL > 0 {
+ return l.remainingTTL
+ }
+ return l.ttl
+}
+
+// refresh refreshes the expiry of the lease.
+func (l *Lease) refresh(extend time.Duration) {
+ newExpiry := time.Now().Add(extend + time.Duration(l.RemainingTTL())*time.Second)
+ l.expiryMu.Lock()
+ defer l.expiryMu.Unlock()
+ l.expiry = newExpiry
+}
+
+// forever sets the expiry of lease to be forever.
+func (l *Lease) forever() {
+ l.expiryMu.Lock()
+ defer l.expiryMu.Unlock()
+ l.expiry = forever
+}
+
+// Keys returns all the keys attached to the lease.
+func (l *Lease) Keys() []string {
+ l.mu.RLock()
+ keys := make([]string, 0, len(l.itemSet))
+ for k := range l.itemSet {
+ keys = append(keys, k.Key)
+ }
+ l.mu.RUnlock()
+ return keys
+}
+
+// Remaining returns the remaining time of the lease.
+func (l *Lease) Remaining() time.Duration {
+ l.expiryMu.RLock()
+ defer l.expiryMu.RUnlock()
+ if l.expiry.IsZero() {
+ return time.Duration(math.MaxInt64)
+ }
+ return time.Until(l.expiry)
+}
+
+type LeaseItem struct {
+ Key string
+}
+
+func int64ToBytes(n int64) []byte {
+ bytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(bytes, uint64(n))
+ return bytes
+}
+
+// FakeLessor is a fake implementation of Lessor interface.
+// Used for testing only.
+type FakeLessor struct{}
+
+func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {}
+
+func (fl *FakeLessor) SetCheckpointer(cp Checkpointer) {}
+
+func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil }
+
+func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
+
+func (fl *FakeLessor) Checkpoint(id LeaseID, remainingTTL int64) error { return nil }
+
+func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
+
+func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 }
+func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
+
+func (fl *FakeLessor) Promote(extend time.Duration) {}
+
+func (fl *FakeLessor) Demote() {}
+
+func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil }
+
+func (fl *FakeLessor) Lookup(id LeaseID) *Lease { return nil }
+
+func (fl *FakeLessor) Leases() []*Lease { return nil }
+
+func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil }
+
+func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {}
+
+func (fl *FakeLessor) Stop() {}
+
+type FakeTxnDelete struct {
+ backend.BatchTx
+}
+
+func (ftd *FakeTxnDelete) DeleteRange(key, end []byte) (n, rev int64) { return 0, 0 }
+func (ftd *FakeTxnDelete) End() { ftd.Unlock() }
diff --git a/vendor/go.etcd.io/etcd/lease/metrics.go b/vendor/go.etcd.io/etcd/lease/metrics.go
new file mode 100644
index 000000000000..06f8b58015f9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/lease/metrics.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ leaseGranted = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "lease",
+ Name: "granted_total",
+ Help: "The total number of granted leases.",
+ })
+
+ leaseRevoked = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "lease",
+ Name: "revoked_total",
+ Help: "The total number of revoked leases.",
+ })
+
+ leaseRenewed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "lease",
+ Name: "renewed_total",
+ Help: "The number of renewed leases seen by the leader.",
+ })
+
+ leaseTotalTTLs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "lease",
+ Name: "ttl_total",
+ Help: "Bucketed histogram of lease TTLs.",
+ // 1 second -> 3 months
+ Buckets: prometheus.ExponentialBuckets(1, 2, 24),
+ })
+)
+
+func init() {
+ prometheus.MustRegister(leaseGranted)
+ prometheus.MustRegister(leaseRevoked)
+ prometheus.MustRegister(leaseRenewed)
+ prometheus.MustRegister(leaseTotalTTLs)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/backend.go b/vendor/go.etcd.io/etcd/mvcc/backend/backend.go
new file mode 100644
index 000000000000..26f196fbff3a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/backend.go
@@ -0,0 +1,591 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "fmt"
+ "hash/crc32"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/coreos/pkg/capnslog"
+ humanize "github.com/dustin/go-humanize"
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+var (
+ defaultBatchLimit = 10000
+ defaultBatchInterval = 100 * time.Millisecond
+
+ defragLimit = 10000
+
+ // initialMmapSize is the initial size of the mmapped region. Setting this larger than
+ // the potential max db size can prevent writer from blocking reader.
+ // This only works for linux.
+ initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
+
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "mvcc/backend")
+
+ // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
+ minSnapshotWarningTimeout = 30 * time.Second
+)
+
+type Backend interface {
+ // ReadTx returns a read transaction. It is replaced by ConcurrentReadTx in the main data path, see #10523.
+ ReadTx() ReadTx
+ BatchTx() BatchTx
+ // ConcurrentReadTx returns a non-blocking read transaction.
+ ConcurrentReadTx() ReadTx
+
+ Snapshot() Snapshot
+ Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
+ // Size returns the current size of the backend physically allocated.
+ // The backend can hold DB space that is not utilized at the moment,
+ // since it can conduct pre-allocation or spare unused space for recycling.
+ // Use SizeInUse() instead for the actual DB size.
+ Size() int64
+ // SizeInUse returns the current size of the backend logically in use.
+ // Since the backend can manage free space in a non-byte unit such as
+ // number of pages, the returned value can be not exactly accurate in bytes.
+ SizeInUse() int64
+ // OpenReadTxN returns the number of currently open read transactions in the backend.
+ OpenReadTxN() int64
+ Defrag() error
+ ForceCommit()
+ Close() error
+}
+
+type Snapshot interface {
+ // Size gets the size of the snapshot.
+ Size() int64
+ // WriteTo writes the snapshot into the given writer.
+ WriteTo(w io.Writer) (n int64, err error)
+ // Close closes the snapshot.
+ Close() error
+}
+
+type backend struct {
+ // size and commits are used with atomic operations so they must be
+ // 64-bit aligned, otherwise 32-bit tests will crash
+
+ // size is the number of bytes allocated in the backend
+ size int64
+ // sizeInUse is the number of bytes actually used in the backend
+ sizeInUse int64
+ // commits counts number of commits since start
+ commits int64
+ // openReadTxN is the number of currently open read transactions in the backend
+ openReadTxN int64
+
+ mu sync.RWMutex
+ db *bolt.DB
+
+ batchInterval time.Duration
+ batchLimit int
+ batchTx *batchTxBuffered
+
+ readTx *readTx
+
+ stopc chan struct{}
+ donec chan struct{}
+
+ lg *zap.Logger
+}
+
+type BackendConfig struct {
+ // Path is the file path to the backend file.
+ Path string
+ // BatchInterval is the maximum time before flushing the BatchTx.
+ BatchInterval time.Duration
+ // BatchLimit is the maximum puts before flushing the BatchTx.
+ BatchLimit int
+ // BackendFreelistType is the backend boltdb's freelist type.
+ BackendFreelistType bolt.FreelistType
+ // MmapSize is the number of bytes to mmap for the backend.
+ MmapSize uint64
+ // Logger logs backend-side operations.
+ Logger *zap.Logger
+}
+
+func DefaultBackendConfig() BackendConfig {
+ return BackendConfig{
+ BatchInterval: defaultBatchInterval,
+ BatchLimit: defaultBatchLimit,
+ MmapSize: initialMmapSize,
+ }
+}
+
+func New(bcfg BackendConfig) Backend {
+ return newBackend(bcfg)
+}
+
+func NewDefaultBackend(path string) Backend {
+ bcfg := DefaultBackendConfig()
+ bcfg.Path = path
+ return newBackend(bcfg)
+}
+
+func newBackend(bcfg BackendConfig) *backend {
+ bopts := &bolt.Options{}
+ if boltOpenOptions != nil {
+ *bopts = *boltOpenOptions
+ }
+ bopts.InitialMmapSize = bcfg.mmapSize()
+ bopts.FreelistType = bcfg.BackendFreelistType
+
+ db, err := bolt.Open(bcfg.Path, 0600, bopts)
+ if err != nil {
+ if bcfg.Logger != nil {
+ bcfg.Logger.Panic("failed to open database", zap.String("path", bcfg.Path), zap.Error(err))
+ } else {
+ plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err)
+ }
+ }
+
+ // In future, may want to make buffering optional for low-concurrency systems
+ // or dynamically swap between buffered/non-buffered depending on workload.
+ b := &backend{
+ db: db,
+
+ batchInterval: bcfg.BatchInterval,
+ batchLimit: bcfg.BatchLimit,
+
+ readTx: &readTx{
+ buf: txReadBuffer{
+ txBuffer: txBuffer{make(map[string]*bucketBuffer)},
+ },
+ buckets: make(map[string]*bolt.Bucket),
+ txWg: new(sync.WaitGroup),
+ },
+
+ stopc: make(chan struct{}),
+ donec: make(chan struct{}),
+
+ lg: bcfg.Logger,
+ }
+ b.batchTx = newBatchTxBuffered(b)
+ go b.run()
+ return b
+}
+
+// BatchTx returns the current batch tx in coalescer. The tx can be used for read and
+// write operations. The write result can be retrieved within the same tx immediately.
+// The write result is isolated with other txs until the current one get committed.
+func (b *backend) BatchTx() BatchTx {
+ return b.batchTx
+}
+
+func (b *backend) ReadTx() ReadTx { return b.readTx }
+
+// ConcurrentReadTx creates and returns a new ReadTx, which:
+// A) creates and keeps a copy of backend.readTx.txReadBuffer,
+// B) references the boltdb read Tx (and its bucket cache) of current batch interval.
+func (b *backend) ConcurrentReadTx() ReadTx {
+ b.readTx.RLock()
+ defer b.readTx.RUnlock()
+ // prevent boltdb read Tx from been rolled back until store read Tx is done. Needs to be called when holding readTx.RLock().
+ b.readTx.txWg.Add(1)
+ // TODO: might want to copy the read buffer lazily - create copy when A) end of a write transaction B) end of a batch interval.
+ return &concurrentReadTx{
+ buf: b.readTx.buf.unsafeCopy(),
+ tx: b.readTx.tx,
+ txMu: &b.readTx.txMu,
+ buckets: b.readTx.buckets,
+ txWg: b.readTx.txWg,
+ }
+}
+
+// ForceCommit forces the current batching tx to commit.
+func (b *backend) ForceCommit() {
+ b.batchTx.Commit()
+}
+
+func (b *backend) Snapshot() Snapshot {
+ b.batchTx.Commit()
+
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ tx, err := b.db.Begin(false)
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to begin tx", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot begin tx (%s)", err)
+ }
+ }
+
+ stopc, donec := make(chan struct{}), make(chan struct{})
+ dbBytes := tx.Size()
+ go func() {
+ defer close(donec)
+ // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection
+ // assuming a min tcp throughput of 100MB/s.
+ var sendRateBytes int64 = 100 * 1024 * 1014
+ warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
+ if warningTimeout < minSnapshotWarningTimeout {
+ warningTimeout = minSnapshotWarningTimeout
+ }
+ start := time.Now()
+ ticker := time.NewTicker(warningTimeout)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ if b.lg != nil {
+ b.lg.Warn(
+ "snapshotting taking too long to transfer",
+ zap.Duration("taking", time.Since(start)),
+ zap.Int64("bytes", dbBytes),
+ zap.String("size", humanize.Bytes(uint64(dbBytes))),
+ )
+ } else {
+ plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start)
+ }
+
+ case <-stopc:
+ snapshotTransferSec.Observe(time.Since(start).Seconds())
+ return
+ }
+ }
+ }()
+
+ return &snapshot{tx, stopc, donec}
+}
+
+type IgnoreKey struct {
+ Bucket string
+ Key string
+}
+
+func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) {
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ err := b.db.View(func(tx *bolt.Tx) error {
+ c := tx.Cursor()
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("cannot get hash of bucket %s", string(next))
+ }
+ h.Write(next)
+ b.ForEach(func(k, v []byte) error {
+ bk := IgnoreKey{Bucket: string(next), Key: string(k)}
+ if _, ok := ignores[bk]; !ok {
+ h.Write(k)
+ h.Write(v)
+ }
+ return nil
+ })
+ }
+ return nil
+ })
+
+ if err != nil {
+ return 0, err
+ }
+
+ return h.Sum32(), nil
+}
+
+func (b *backend) Size() int64 {
+ return atomic.LoadInt64(&b.size)
+}
+
+func (b *backend) SizeInUse() int64 {
+ return atomic.LoadInt64(&b.sizeInUse)
+}
+
+func (b *backend) run() {
+ defer close(b.donec)
+ t := time.NewTimer(b.batchInterval)
+ defer t.Stop()
+ for {
+ select {
+ case <-t.C:
+ case <-b.stopc:
+ b.batchTx.CommitAndStop()
+ return
+ }
+ if b.batchTx.safePending() != 0 {
+ b.batchTx.Commit()
+ }
+ t.Reset(b.batchInterval)
+ }
+}
+
+func (b *backend) Close() error {
+ close(b.stopc)
+ <-b.donec
+ return b.db.Close()
+}
+
+// Commits returns total number of commits since start
+func (b *backend) Commits() int64 {
+ return atomic.LoadInt64(&b.commits)
+}
+
+func (b *backend) Defrag() error {
+ return b.defrag()
+}
+
+func (b *backend) defrag() error {
+ now := time.Now()
+
+ // TODO: make this non-blocking?
+ // lock batchTx to ensure nobody is using previous tx, and then
+ // close previous ongoing tx.
+ b.batchTx.Lock()
+ defer b.batchTx.Unlock()
+
+ // lock database after lock tx to avoid deadlock.
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // block concurrent read requests while resetting tx
+ b.readTx.Lock()
+ defer b.readTx.Unlock()
+
+ b.batchTx.unsafeCommit(true)
+
+ b.batchTx.tx = nil
+
+ // Create a temporary file to ensure we start with a clean slate.
+ // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup.
+ dir := filepath.Dir(b.db.Path())
+ temp, err := ioutil.TempFile(dir, "db.tmp.*")
+ if err != nil {
+ return err
+ }
+ options := bolt.Options{}
+ if boltOpenOptions != nil {
+ options = *boltOpenOptions
+ }
+ options.OpenFile = func(path string, i int, mode os.FileMode) (file *os.File, err error) {
+ return temp, nil
+ }
+ tdbp := temp.Name()
+ tmpdb, err := bolt.Open(tdbp, 0600, &options)
+ if err != nil {
+ return err
+ }
+
+ dbp := b.db.Path()
+ size1, sizeInUse1 := b.Size(), b.SizeInUse()
+ if b.lg != nil {
+ b.lg.Info(
+ "defragmenting",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes", size1),
+ zap.String("current-db-size", humanize.Bytes(uint64(size1))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse1),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))),
+ )
+ }
+ // gofail: var defragBeforeCopy struct{}
+ err = defragdb(b.db, tmpdb, defragLimit)
+ if err != nil {
+ tmpdb.Close()
+ if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil {
+ if b.lg != nil {
+ b.lg.Error("failed to remove db.tmp after defragmentation completed", zap.Error(rmErr))
+ } else {
+ plog.Fatalf("failed to remove db.tmp after defragmentation completed: %v", rmErr)
+ }
+ }
+ return err
+ }
+
+ err = b.db.Close()
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to close database", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot close database (%s)", err)
+ }
+ }
+ err = tmpdb.Close()
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to close tmp database", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot close database (%s)", err)
+ }
+ }
+ // gofail: var defragBeforeRename struct{}
+ err = os.Rename(tdbp, dbp)
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to rename tmp database", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot rename database (%s)", err)
+ }
+ }
+
+ b.db, err = bolt.Open(dbp, 0600, boltOpenOptions)
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to open database", zap.String("path", dbp), zap.Error(err))
+ } else {
+ plog.Panicf("cannot open database at %s (%v)", dbp, err)
+ }
+ }
+ b.batchTx.tx = b.unsafeBegin(true)
+
+ b.readTx.reset()
+ b.readTx.tx = b.unsafeBegin(false)
+
+ size := b.readTx.tx.Size()
+ db := b.readTx.tx.DB()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
+
+ took := time.Since(now)
+ defragSec.Observe(took.Seconds())
+
+ size2, sizeInUse2 := b.Size(), b.SizeInUse()
+ if b.lg != nil {
+ b.lg.Info(
+ "defragmented",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes-diff", size2-size1),
+ zap.Int64("current-db-size-bytes", size2),
+ zap.String("current-db-size", humanize.Bytes(uint64(size2))),
+ zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse2),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))),
+ zap.Duration("took", took),
+ )
+ }
+ return nil
+}
+
+func defragdb(odb, tmpdb *bolt.DB, limit int) error {
+ // open a tx on tmpdb for writes
+ tmptx, err := tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+
+ // open a tx on old db for read
+ tx, err := odb.Begin(false)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ c := tx.Cursor()
+
+ count := 0
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("backend: cannot defrag bucket %s", string(next))
+ }
+
+ tmpb, berr := tmptx.CreateBucketIfNotExists(next)
+ if berr != nil {
+ return berr
+ }
+ tmpb.FillPercent = 0.9 // for seq write in for each
+
+ b.ForEach(func(k, v []byte) error {
+ count++
+ if count > limit {
+ err = tmptx.Commit()
+ if err != nil {
+ return err
+ }
+ tmptx, err = tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+ tmpb = tmptx.Bucket(next)
+ tmpb.FillPercent = 0.9 // for seq write in for each
+
+ count = 0
+ }
+ return tmpb.Put(k, v)
+ })
+ }
+
+ return tmptx.Commit()
+}
+
+func (b *backend) begin(write bool) *bolt.Tx {
+ b.mu.RLock()
+ tx := b.unsafeBegin(write)
+ b.mu.RUnlock()
+
+ size := tx.Size()
+ db := tx.DB()
+ stats := db.Stats()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(stats.FreePageN)*int64(db.Info().PageSize)))
+ atomic.StoreInt64(&b.openReadTxN, int64(stats.OpenTxN))
+
+ return tx
+}
+
+func (b *backend) unsafeBegin(write bool) *bolt.Tx {
+ tx, err := b.db.Begin(write)
+ if err != nil {
+ if b.lg != nil {
+ b.lg.Fatal("failed to begin tx", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot begin tx (%s)", err)
+ }
+ }
+ return tx
+}
+
+func (b *backend) OpenReadTxN() int64 {
+ return atomic.LoadInt64(&b.openReadTxN)
+}
+
+// NewTmpBackend creates a backend implementation for testing.
+func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) {
+ dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test")
+ if err != nil {
+ panic(err)
+ }
+ tmpPath := filepath.Join(dir, "database")
+ bcfg := DefaultBackendConfig()
+ bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit
+ return newBackend(bcfg), tmpPath
+}
+
+func NewDefaultTmpBackend() (*backend, string) {
+ return NewTmpBackend(defaultBatchInterval, defaultBatchLimit)
+}
+
+type snapshot struct {
+ *bolt.Tx
+ stopc chan struct{}
+ donec chan struct{}
+}
+
+func (s *snapshot) Close() error {
+ close(s.stopc)
+ <-s.donec
+ return s.Tx.Rollback()
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/batch_tx.go b/vendor/go.etcd.io/etcd/mvcc/backend/batch_tx.go
new file mode 100644
index 000000000000..d5c8a88c353e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/batch_tx.go
@@ -0,0 +1,339 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ bolt "go.etcd.io/bbolt"
+ "go.uber.org/zap"
+)
+
+type BatchTx interface {
+ ReadTx
+ UnsafeCreateBucket(name []byte)
+ UnsafePut(bucketName []byte, key []byte, value []byte)
+ UnsafeSeqPut(bucketName []byte, key []byte, value []byte)
+ UnsafeDelete(bucketName []byte, key []byte)
+ // Commit commits a previous tx and begins a new writable one.
+ Commit()
+ // CommitAndStop commits the previous tx and does not create a new one.
+ CommitAndStop()
+}
+
+type batchTx struct {
+ sync.Mutex
+ tx *bolt.Tx
+ backend *backend
+
+ pending int
+}
+
+func (t *batchTx) Lock() {
+ t.Mutex.Lock()
+}
+
+func (t *batchTx) Unlock() {
+ if t.pending >= t.backend.batchLimit {
+ t.commit(false)
+ }
+ t.Mutex.Unlock()
+}
+
+// BatchTx interface embeds ReadTx interface. But RLock() and RUnlock() do not
+// have appropriate semantics in BatchTx interface. Therefore should not be called.
+// TODO: might want to decouple ReadTx and BatchTx
+
+func (t *batchTx) RLock() {
+ panic("unexpected RLock")
+}
+
+func (t *batchTx) RUnlock() {
+ panic("unexpected RUnlock")
+}
+
+func (t *batchTx) UnsafeCreateBucket(name []byte) {
+ _, err := t.tx.CreateBucket(name)
+ if err != nil && err != bolt.ErrBucketExists {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to create a bucket",
+ zap.String("bucket-name", string(name)),
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot create bucket %s (%v)", name, err)
+ }
+ }
+ t.pending++
+}
+
+// UnsafePut must be called holding the lock on the tx.
+func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
+ t.unsafePut(bucketName, key, value, false)
+}
+
+// UnsafeSeqPut must be called holding the lock on the tx.
+func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
+ t.unsafePut(bucketName, key, value, true)
+}
+
+func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) {
+ bucket := t.tx.Bucket(bucketName)
+ if bucket == nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.String("bucket-name", string(bucketName)),
+ )
+ } else {
+ plog.Fatalf("bucket %s does not exist", bucketName)
+ }
+ }
+ if seq {
+ // it is useful to increase fill percent when the workloads are mostly append-only.
+ // this can delay the page split and reduce space usage.
+ bucket.FillPercent = 0.9
+ }
+ if err := bucket.Put(key, value); err != nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to write to a bucket",
+ zap.String("bucket-name", string(bucketName)),
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot put key into bucket (%v)", err)
+ }
+ }
+ t.pending++
+}
+
+// UnsafeRange must be called holding the lock on the tx.
+func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ bucket := t.tx.Bucket(bucketName)
+ if bucket == nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.String("bucket-name", string(bucketName)),
+ )
+ } else {
+ plog.Fatalf("bucket %s does not exist", bucketName)
+ }
+ }
+ return unsafeRange(bucket.Cursor(), key, endKey, limit)
+}
+
+func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ var isMatch func(b []byte) bool
+ if len(endKey) > 0 {
+ isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
+ } else {
+ isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
+ limit = 1
+ }
+
+ for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
+ vs = append(vs, cv)
+ keys = append(keys, ck)
+ if limit == int64(len(keys)) {
+ break
+ }
+ }
+ return keys, vs
+}
+
+// UnsafeDelete must be called holding the lock on the tx.
+func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) {
+ bucket := t.tx.Bucket(bucketName)
+ if bucket == nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.String("bucket-name", string(bucketName)),
+ )
+ } else {
+ plog.Fatalf("bucket %s does not exist", bucketName)
+ }
+ }
+ err := bucket.Delete(key)
+ if err != nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal(
+ "failed to delete a key",
+ zap.String("bucket-name", string(bucketName)),
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot delete key from bucket (%v)", err)
+ }
+ }
+ t.pending++
+}
+
+// UnsafeForEach must be called holding the lock on the tx.
+func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+ return unsafeForEach(t.tx, bucketName, visitor)
+}
+
+func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error {
+ if b := tx.Bucket(bucket); b != nil {
+ return b.ForEach(visitor)
+ }
+ return nil
+}
+
+// Commit commits a previous tx and begins a new writable one.
+func (t *batchTx) Commit() {
+ t.Lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+// CommitAndStop commits the previous tx and does not create a new one.
+func (t *batchTx) CommitAndStop() {
+ t.Lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTx) safePending() int {
+ t.Mutex.Lock()
+ defer t.Mutex.Unlock()
+ return t.pending
+}
+
+func (t *batchTx) commit(stop bool) {
+ // commit the last tx
+ if t.tx != nil {
+ if t.pending == 0 && !stop {
+ return
+ }
+
+ start := time.Now()
+
+ // gofail: var beforeCommit struct{}
+ err := t.tx.Commit()
+ // gofail: var afterCommit struct{}
+
+ rebalanceSec.Observe(t.tx.Stats().RebalanceTime.Seconds())
+ spillSec.Observe(t.tx.Stats().SpillTime.Seconds())
+ writeSec.Observe(t.tx.Stats().WriteTime.Seconds())
+ commitSec.Observe(time.Since(start).Seconds())
+ atomic.AddInt64(&t.backend.commits, 1)
+
+ t.pending = 0
+ if err != nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal("failed to commit tx", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot commit tx (%s)", err)
+ }
+ }
+ }
+ if !stop {
+ t.tx = t.backend.begin(true)
+ }
+}
+
+type batchTxBuffered struct {
+ batchTx
+ buf txWriteBuffer
+}
+
+func newBatchTxBuffered(backend *backend) *batchTxBuffered {
+ tx := &batchTxBuffered{
+ batchTx: batchTx{backend: backend},
+ buf: txWriteBuffer{
+ txBuffer: txBuffer{make(map[string]*bucketBuffer)},
+ seq: true,
+ },
+ }
+ tx.Commit()
+ return tx
+}
+
+func (t *batchTxBuffered) Unlock() {
+ if t.pending != 0 {
+ t.backend.readTx.Lock() // blocks txReadBuffer for writing.
+ t.buf.writeback(&t.backend.readTx.buf)
+ t.backend.readTx.Unlock()
+ if t.pending >= t.backend.batchLimit {
+ t.commit(false)
+ }
+ }
+ t.batchTx.Unlock()
+}
+
+func (t *batchTxBuffered) Commit() {
+ t.Lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) CommitAndStop() {
+ t.Lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) commit(stop bool) {
+ // all read txs must be closed to acquire boltdb commit rwlock
+ t.backend.readTx.Lock()
+ t.unsafeCommit(stop)
+ t.backend.readTx.Unlock()
+}
+
+func (t *batchTxBuffered) unsafeCommit(stop bool) {
+ if t.backend.readTx.tx != nil {
+ // wait all store read transactions using the current boltdb tx to finish,
+ // then close the boltdb tx
+ go func(tx *bolt.Tx, wg *sync.WaitGroup) {
+ wg.Wait()
+ if err := tx.Rollback(); err != nil {
+ if t.backend.lg != nil {
+ t.backend.lg.Fatal("failed to rollback tx", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot rollback tx (%s)", err)
+ }
+ }
+ }(t.backend.readTx.tx, t.backend.readTx.txWg)
+ t.backend.readTx.reset()
+ }
+
+ t.batchTx.commit(stop)
+
+ if !stop {
+ t.backend.readTx.tx = t.backend.begin(false)
+ }
+}
+
+func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) {
+ t.batchTx.UnsafePut(bucketName, key, value)
+ t.buf.put(bucketName, key, value)
+}
+
+func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
+ t.batchTx.UnsafeSeqPut(bucketName, key, value)
+ t.buf.putSeq(bucketName, key, value)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/config_default.go b/vendor/go.etcd.io/etcd/mvcc/backend/config_default.go
new file mode 100644
index 000000000000..f15f030f8e5e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/config_default.go
@@ -0,0 +1,23 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux,!windows
+
+package backend
+
+import bolt "go.etcd.io/bbolt"
+
+var boltOpenOptions *bolt.Options
+
+func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/config_linux.go b/vendor/go.etcd.io/etcd/mvcc/backend/config_linux.go
new file mode 100644
index 000000000000..f712671af440
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/config_linux.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "syscall"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead
+// which can speed up entire-database read with boltdb. We want to
+// enable MAP_POPULATE for faster key-value store recovery in storage
+// package. If your kernel version is lower than 2.6.23
+// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might
+// silently ignore this flag. Please update your kernel to prevent this.
+var boltOpenOptions = &bolt.Options{
+ MmapFlags: syscall.MAP_POPULATE,
+ NoFreelistSync: true,
+}
+
+func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/config_windows.go b/vendor/go.etcd.io/etcd/mvcc/backend/config_windows.go
new file mode 100644
index 000000000000..c6500592c675
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/config_windows.go
@@ -0,0 +1,26 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package backend
+
+import bolt "go.etcd.io/bbolt"
+
+var boltOpenOptions *bolt.Options = nil
+
+// setting mmap size != 0 on windows will allocate the entire
+// mmap size for the file, instead of growing it. So, force 0.
+
+func (bcfg *BackendConfig) mmapSize() int { return 0 }
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/doc.go b/vendor/go.etcd.io/etcd/mvcc/backend/doc.go
new file mode 100644
index 000000000000..9cc42fa793cb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package backend defines a standard interface for etcd's backend MVCC storage.
+package backend
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/metrics.go b/vendor/go.etcd.io/etcd/mvcc/backend/metrics.go
new file mode 100644
index 000000000000..d9641af7ae23
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/metrics.go
@@ -0,0 +1,95 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ commitSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_commit_duration_seconds",
+ Help: "The latency distributions of commit called by backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ rebalanceSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_rebalance_duration_seconds",
+ Help: "The latency distributions of commit.rebalance called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ spillSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_spill_duration_seconds",
+ Help: "The latency distributions of commit.spill called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ writeSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_write_duration_seconds",
+ Help: "The latency distributions of commit.write called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ defragSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_defrag_duration_seconds",
+ Help: "The latency distribution of backend defragmentation.",
+
+ // 100 MB usually takes 1 sec, so start with 10 MB of 100 ms
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^12 == 409.6 sec
+ Buckets: prometheus.ExponentialBuckets(.1, 2, 13),
+ })
+
+ snapshotTransferSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_snapshot_duration_seconds",
+ Help: "The latency distribution of backend snapshots.",
+
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^16 == 655.36 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
+ })
+)
+
+func init() {
+ prometheus.MustRegister(commitSec)
+ prometheus.MustRegister(rebalanceSec)
+ prometheus.MustRegister(spillSec)
+ prometheus.MustRegister(writeSec)
+ prometheus.MustRegister(defragSec)
+ prometheus.MustRegister(snapshotTransferSec)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/read_tx.go b/vendor/go.etcd.io/etcd/mvcc/backend/read_tx.go
new file mode 100644
index 000000000000..91fe72ec5589
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/read_tx.go
@@ -0,0 +1,210 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "math"
+ "sync"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
+// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket
+// is known to never overwrite any key so range is safe.
+var safeRangeBucket = []byte("key")
+
+type ReadTx interface {
+ Lock()
+ Unlock()
+ RLock()
+ RUnlock()
+
+ UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
+ UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error
+}
+
+type readTx struct {
+ // mu protects accesses to the txReadBuffer
+ mu sync.RWMutex
+ buf txReadBuffer
+
+ // TODO: group and encapsulate {txMu, tx, buckets, txWg}, as they share the same lifecycle.
+ // txMu protects accesses to buckets and tx on Range requests.
+ txMu sync.RWMutex
+ tx *bolt.Tx
+ buckets map[string]*bolt.Bucket
+ // txWg protects tx from being rolled back at the end of a batch interval until all reads using this tx are done.
+ txWg *sync.WaitGroup
+}
+
+func (rt *readTx) Lock() { rt.mu.Lock() }
+func (rt *readTx) Unlock() { rt.mu.Unlock() }
+func (rt *readTx) RLock() { rt.mu.RLock() }
+func (rt *readTx) RUnlock() { rt.mu.RUnlock() }
+
+func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if endKey == nil {
+ // forbid duplicates for single keys
+ limit = 1
+ }
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
+ panic("do not use unsafeRange on non-keys bucket")
+ }
+ keys, vals := rt.buf.Range(bucketName, key, endKey, limit)
+ if int64(len(keys)) == limit {
+ return keys, vals
+ }
+
+ // find/cache bucket
+ bn := string(bucketName)
+ rt.txMu.RLock()
+ bucket, ok := rt.buckets[bn]
+ rt.txMu.RUnlock()
+ if !ok {
+ rt.txMu.Lock()
+ bucket = rt.tx.Bucket(bucketName)
+ rt.buckets[bn] = bucket
+ rt.txMu.Unlock()
+ }
+
+ // ignore missing bucket since may have been created in this batch
+ if bucket == nil {
+ return keys, vals
+ }
+ rt.txMu.Lock()
+ c := bucket.Cursor()
+ rt.txMu.Unlock()
+
+ k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
+ return append(k2, keys...), append(v2, vals...)
+}
+
+func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+ dups := make(map[string]struct{})
+ getDups := func(k, v []byte) error {
+ dups[string(k)] = struct{}{}
+ return nil
+ }
+ visitNoDup := func(k, v []byte) error {
+ if _, ok := dups[string(k)]; ok {
+ return nil
+ }
+ return visitor(k, v)
+ }
+ if err := rt.buf.ForEach(bucketName, getDups); err != nil {
+ return err
+ }
+ rt.txMu.Lock()
+ err := unsafeForEach(rt.tx, bucketName, visitNoDup)
+ rt.txMu.Unlock()
+ if err != nil {
+ return err
+ }
+ return rt.buf.ForEach(bucketName, visitor)
+}
+
+func (rt *readTx) reset() {
+ rt.buf.reset()
+ rt.buckets = make(map[string]*bolt.Bucket)
+ rt.tx = nil
+ rt.txWg = new(sync.WaitGroup)
+}
+
+// TODO: create a base type for readTx and concurrentReadTx to avoid duplicated function implementation?
+type concurrentReadTx struct {
+ buf txReadBuffer
+ txMu *sync.RWMutex
+ tx *bolt.Tx
+ buckets map[string]*bolt.Bucket
+ txWg *sync.WaitGroup
+}
+
+func (rt *concurrentReadTx) Lock() {}
+func (rt *concurrentReadTx) Unlock() {}
+
+// RLock is no-op. concurrentReadTx does not need to be locked after it is created.
+func (rt *concurrentReadTx) RLock() {}
+
+// RUnlock signals the end of concurrentReadTx.
+func (rt *concurrentReadTx) RUnlock() { rt.txWg.Done() }
+
+func (rt *concurrentReadTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+ dups := make(map[string]struct{})
+ getDups := func(k, v []byte) error {
+ dups[string(k)] = struct{}{}
+ return nil
+ }
+ visitNoDup := func(k, v []byte) error {
+ if _, ok := dups[string(k)]; ok {
+ return nil
+ }
+ return visitor(k, v)
+ }
+ if err := rt.buf.ForEach(bucketName, getDups); err != nil {
+ return err
+ }
+ rt.txMu.Lock()
+ err := unsafeForEach(rt.tx, bucketName, visitNoDup)
+ rt.txMu.Unlock()
+ if err != nil {
+ return err
+ }
+ return rt.buf.ForEach(bucketName, visitor)
+}
+
+func (rt *concurrentReadTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if endKey == nil {
+ // forbid duplicates for single keys
+ limit = 1
+ }
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
+ panic("do not use unsafeRange on non-keys bucket")
+ }
+ keys, vals := rt.buf.Range(bucketName, key, endKey, limit)
+ if int64(len(keys)) == limit {
+ return keys, vals
+ }
+
+ // find/cache bucket
+ bn := string(bucketName)
+ rt.txMu.RLock()
+ bucket, ok := rt.buckets[bn]
+ rt.txMu.RUnlock()
+ if !ok {
+ rt.txMu.Lock()
+ bucket = rt.tx.Bucket(bucketName)
+ rt.buckets[bn] = bucket
+ rt.txMu.Unlock()
+ }
+
+ // ignore missing bucket since may have been created in this batch
+ if bucket == nil {
+ return keys, vals
+ }
+ rt.txMu.Lock()
+ c := bucket.Cursor()
+ rt.txMu.Unlock()
+
+ k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
+ return append(k2, keys...), append(v2, vals...)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/backend/tx_buffer.go b/vendor/go.etcd.io/etcd/mvcc/backend/tx_buffer.go
new file mode 100644
index 000000000000..d73463823cad
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/backend/tx_buffer.go
@@ -0,0 +1,203 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "sort"
+)
+
+// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer.
+type txBuffer struct {
+ buckets map[string]*bucketBuffer
+}
+
+func (txb *txBuffer) reset() {
+ for k, v := range txb.buckets {
+ if v.used == 0 {
+ // demote
+ delete(txb.buckets, k)
+ }
+ v.used = 0
+ }
+}
+
+// txWriteBuffer buffers writes of pending updates that have not yet committed.
+type txWriteBuffer struct {
+ txBuffer
+ seq bool
+}
+
+func (txw *txWriteBuffer) put(bucket, k, v []byte) {
+ txw.seq = false
+ txw.putSeq(bucket, k, v)
+}
+
+func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) {
+ b, ok := txw.buckets[string(bucket)]
+ if !ok {
+ b = newBucketBuffer()
+ txw.buckets[string(bucket)] = b
+ }
+ b.add(k, v)
+}
+
+func (txw *txWriteBuffer) writeback(txr *txReadBuffer) {
+ for k, wb := range txw.buckets {
+ rb, ok := txr.buckets[k]
+ if !ok {
+ delete(txw.buckets, k)
+ txr.buckets[k] = wb
+ continue
+ }
+ if !txw.seq && wb.used > 1 {
+ // assume no duplicate keys
+ sort.Sort(wb)
+ }
+ rb.merge(wb)
+ }
+ txw.reset()
+}
+
+// txReadBuffer accesses buffered updates.
+type txReadBuffer struct{ txBuffer }
+
+func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if b := txr.buckets[string(bucketName)]; b != nil {
+ return b.Range(key, endKey, limit)
+ }
+ return nil, nil
+}
+
+func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error {
+ if b := txr.buckets[string(bucketName)]; b != nil {
+ return b.ForEach(visitor)
+ }
+ return nil
+}
+
+// unsafeCopy returns a copy of txReadBuffer, caller should acquire backend.readTx.RLock()
+func (txr *txReadBuffer) unsafeCopy() txReadBuffer {
+ txrCopy := txReadBuffer{
+ txBuffer: txBuffer{
+ buckets: make(map[string]*bucketBuffer, len(txr.txBuffer.buckets)),
+ },
+ }
+ for bucketName, bucket := range txr.txBuffer.buckets {
+ txrCopy.txBuffer.buckets[bucketName] = bucket.Copy()
+ }
+ return txrCopy
+}
+
+type kv struct {
+ key []byte
+ val []byte
+}
+
+// bucketBuffer buffers key-value pairs that are pending commit.
+type bucketBuffer struct {
+ buf []kv
+ // used tracks number of elements in use so buf can be reused without reallocation.
+ used int
+}
+
+func newBucketBuffer() *bucketBuffer {
+ return &bucketBuffer{buf: make([]kv, 512), used: 0}
+}
+
+func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
+ f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 }
+ idx := sort.Search(bb.used, f)
+ if idx < 0 {
+ return nil, nil
+ }
+ if len(endKey) == 0 {
+ if bytes.Equal(key, bb.buf[idx].key) {
+ keys = append(keys, bb.buf[idx].key)
+ vals = append(vals, bb.buf[idx].val)
+ }
+ return keys, vals
+ }
+ if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
+ return nil, nil
+ }
+ for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
+ if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
+ break
+ }
+ keys = append(keys, bb.buf[i].key)
+ vals = append(vals, bb.buf[i].val)
+ }
+ return keys, vals
+}
+
+func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
+ for i := 0; i < bb.used; i++ {
+ if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (bb *bucketBuffer) add(k, v []byte) {
+ bb.buf[bb.used].key, bb.buf[bb.used].val = k, v
+ bb.used++
+ if bb.used == len(bb.buf) {
+ buf := make([]kv, (3*len(bb.buf))/2)
+ copy(buf, bb.buf)
+ bb.buf = buf
+ }
+}
+
+// merge merges data from bb into bbsrc.
+func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) {
+ for i := 0; i < bbsrc.used; i++ {
+ bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val)
+ }
+ if bb.used == bbsrc.used {
+ return
+ }
+ if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 {
+ return
+ }
+
+ sort.Stable(bb)
+
+ // remove duplicates, using only newest update
+ widx := 0
+ for ridx := 1; ridx < bb.used; ridx++ {
+ if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) {
+ widx++
+ }
+ bb.buf[widx] = bb.buf[ridx]
+ }
+ bb.used = widx + 1
+}
+
+func (bb *bucketBuffer) Len() int { return bb.used }
+func (bb *bucketBuffer) Less(i, j int) bool {
+ return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0
+}
+func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] }
+
+func (bb *bucketBuffer) Copy() *bucketBuffer {
+ bbCopy := bucketBuffer{
+ buf: make([]kv, len(bb.buf)),
+ used: bb.used,
+ }
+ copy(bbCopy.buf, bb.buf)
+ return &bbCopy
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/doc.go b/vendor/go.etcd.io/etcd/mvcc/doc.go
new file mode 100644
index 000000000000..ad5be03086fb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package mvcc defines etcd's stable MVCC storage.
+package mvcc
diff --git a/vendor/go.etcd.io/etcd/mvcc/index.go b/vendor/go.etcd.io/etcd/mvcc/index.go
new file mode 100644
index 000000000000..f8cc6df88cfb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/index.go
@@ -0,0 +1,258 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "sort"
+ "sync"
+
+ "github.com/google/btree"
+ "go.uber.org/zap"
+)
+
+type index interface {
+ Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
+ Range(key, end []byte, atRev int64) ([][]byte, []revision)
+ Revisions(key, end []byte, atRev int64) []revision
+ Put(key []byte, rev revision)
+ Tombstone(key []byte, rev revision) error
+ RangeSince(key, end []byte, rev int64) []revision
+ Compact(rev int64) map[revision]struct{}
+ Keep(rev int64) map[revision]struct{}
+ Equal(b index) bool
+
+ Insert(ki *keyIndex)
+ KeyIndex(ki *keyIndex) *keyIndex
+}
+
+type treeIndex struct {
+ sync.RWMutex
+ tree *btree.BTree
+ lg *zap.Logger
+}
+
+func newTreeIndex(lg *zap.Logger) index {
+ return &treeIndex{
+ tree: btree.New(32),
+ lg: lg,
+ }
+}
+
+func (ti *treeIndex) Put(key []byte, rev revision) {
+ keyi := &keyIndex{key: key}
+
+ ti.Lock()
+ defer ti.Unlock()
+ item := ti.tree.Get(keyi)
+ if item == nil {
+ keyi.put(ti.lg, rev.main, rev.sub)
+ ti.tree.ReplaceOrInsert(keyi)
+ return
+ }
+ okeyi := item.(*keyIndex)
+ okeyi.put(ti.lg, rev.main, rev.sub)
+}
+
+func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) {
+ keyi := &keyIndex{key: key}
+ ti.RLock()
+ defer ti.RUnlock()
+ if keyi = ti.keyIndex(keyi); keyi == nil {
+ return revision{}, revision{}, 0, ErrRevisionNotFound
+ }
+ return keyi.get(ti.lg, atRev)
+}
+
+func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex {
+ ti.RLock()
+ defer ti.RUnlock()
+ return ti.keyIndex(keyi)
+}
+
+func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
+ if item := ti.tree.Get(keyi); item != nil {
+ return item.(*keyIndex)
+ }
+ return nil
+}
+
+func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex)) {
+ keyi, endi := &keyIndex{key: key}, &keyIndex{key: end}
+
+ ti.RLock()
+ defer ti.RUnlock()
+
+ ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
+ if len(endi.key) > 0 && !item.Less(endi) {
+ return false
+ }
+ f(item.(*keyIndex))
+ return true
+ })
+}
+
+func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) {
+ if end == nil {
+ rev, _, _, err := ti.Get(key, atRev)
+ if err != nil {
+ return nil
+ }
+ return []revision{rev}
+ }
+ ti.visit(key, end, func(ki *keyIndex) {
+ if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ revs = append(revs, rev)
+ }
+ })
+ return revs
+}
+
+func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) {
+ if end == nil {
+ rev, _, _, err := ti.Get(key, atRev)
+ if err != nil {
+ return nil, nil
+ }
+ return [][]byte{key}, []revision{rev}
+ }
+ ti.visit(key, end, func(ki *keyIndex) {
+ if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ revs = append(revs, rev)
+ keys = append(keys, ki.key)
+ }
+ })
+ return keys, revs
+}
+
+func (ti *treeIndex) Tombstone(key []byte, rev revision) error {
+ keyi := &keyIndex{key: key}
+
+ ti.Lock()
+ defer ti.Unlock()
+ item := ti.tree.Get(keyi)
+ if item == nil {
+ return ErrRevisionNotFound
+ }
+
+ ki := item.(*keyIndex)
+ return ki.tombstone(ti.lg, rev.main, rev.sub)
+}
+
+// RangeSince returns all revisions from key(including) to end(excluding)
+// at or after the given rev. The returned slice is sorted in the order
+// of revision.
+func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
+ keyi := &keyIndex{key: key}
+
+ ti.RLock()
+ defer ti.RUnlock()
+
+ if end == nil {
+ item := ti.tree.Get(keyi)
+ if item == nil {
+ return nil
+ }
+ keyi = item.(*keyIndex)
+ return keyi.since(ti.lg, rev)
+ }
+
+ endi := &keyIndex{key: end}
+ var revs []revision
+ ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
+ if len(endi.key) > 0 && !item.Less(endi) {
+ return false
+ }
+ curKeyi := item.(*keyIndex)
+ revs = append(revs, curKeyi.since(ti.lg, rev)...)
+ return true
+ })
+ sort.Sort(revisions(revs))
+
+ return revs
+}
+
+func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
+ available := make(map[revision]struct{})
+ if ti.lg != nil {
+ ti.lg.Info("compact tree index", zap.Int64("revision", rev))
+ } else {
+ plog.Printf("store.index: compact %d", rev)
+ }
+ ti.Lock()
+ clone := ti.tree.Clone()
+ ti.Unlock()
+
+ clone.Ascend(func(item btree.Item) bool {
+ keyi := item.(*keyIndex)
+ //Lock is needed here to prevent modification to the keyIndex while
+ //compaction is going on or revision added to empty before deletion
+ ti.Lock()
+ keyi.compact(ti.lg, rev, available)
+ if keyi.isEmpty() {
+ item := ti.tree.Delete(keyi)
+ if item == nil {
+ if ti.lg != nil {
+ ti.lg.Panic("failed to delete during compaction")
+ } else {
+ plog.Panic("store.index: unexpected delete failure during compaction")
+ }
+ }
+ }
+ ti.Unlock()
+ return true
+ })
+ return available
+}
+
+// Keep finds all revisions to be kept for a Compaction at the given rev.
+func (ti *treeIndex) Keep(rev int64) map[revision]struct{} {
+ available := make(map[revision]struct{})
+ ti.RLock()
+ defer ti.RUnlock()
+ ti.tree.Ascend(func(i btree.Item) bool {
+ keyi := i.(*keyIndex)
+ keyi.keep(rev, available)
+ return true
+ })
+ return available
+}
+
+func (ti *treeIndex) Equal(bi index) bool {
+ b := bi.(*treeIndex)
+
+ if ti.tree.Len() != b.tree.Len() {
+ return false
+ }
+
+ equal := true
+
+ ti.tree.Ascend(func(item btree.Item) bool {
+ aki := item.(*keyIndex)
+ bki := b.tree.Get(item).(*keyIndex)
+ if !aki.equal(bki) {
+ equal = false
+ return false
+ }
+ return true
+ })
+
+ return equal
+}
+
+func (ti *treeIndex) Insert(ki *keyIndex) {
+ ti.Lock()
+ defer ti.Unlock()
+ ti.tree.ReplaceOrInsert(ki)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/key_index.go b/vendor/go.etcd.io/etcd/mvcc/key_index.go
new file mode 100644
index 000000000000..cf77cb438b35
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/key_index.go
@@ -0,0 +1,402 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ "github.com/google/btree"
+ "go.uber.org/zap"
+)
+
+var (
+ ErrRevisionNotFound = errors.New("mvcc: revision not found")
+)
+
+// keyIndex stores the revisions of a key in the backend.
+// Each keyIndex has at least one key generation.
+// Each generation might have several key versions.
+// Tombstone on a key appends an tombstone version at the end
+// of the current generation and creates a new empty generation.
+// Each version of a key has an index pointing to the backend.
+//
+// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo"
+// generate a keyIndex:
+// key: "foo"
+// rev: 5
+// generations:
+// {empty}
+// {4.0, 5.0(t)}
+// {1.0, 2.0, 3.0(t)}
+//
+// Compact a keyIndex removes the versions with smaller or equal to
+// rev except the largest one. If the generation becomes empty
+// during compaction, it will be removed. if all the generations get
+// removed, the keyIndex should be removed.
+//
+// For example:
+// compact(2) on the previous example
+// generations:
+// {empty}
+// {4.0, 5.0(t)}
+// {2.0, 3.0(t)}
+//
+// compact(4)
+// generations:
+// {empty}
+// {4.0, 5.0(t)}
+//
+// compact(5):
+// generations:
+// {empty} -> key SHOULD be removed.
+//
+// compact(6):
+// generations:
+// {empty} -> key SHOULD be removed.
+type keyIndex struct {
+ key []byte
+ modified revision // the main rev of the last modification
+ generations []generation
+}
+
+// put puts a revision to the keyIndex.
+func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) {
+ rev := revision{main: main, sub: sub}
+
+ if !rev.GreaterThan(ki.modified) {
+ if lg != nil {
+ lg.Panic(
+ "'put' with an unexpected smaller revision",
+ zap.Int64("given-revision-main", rev.main),
+ zap.Int64("given-revision-sub", rev.sub),
+ zap.Int64("modified-revision-main", ki.modified.main),
+ zap.Int64("modified-revision-sub", ki.modified.sub),
+ )
+ } else {
+ plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified)
+ }
+ }
+ if len(ki.generations) == 0 {
+ ki.generations = append(ki.generations, generation{})
+ }
+ g := &ki.generations[len(ki.generations)-1]
+ if len(g.revs) == 0 { // create a new key
+ keysGauge.Inc()
+ g.created = rev
+ }
+ g.revs = append(g.revs, rev)
+ g.ver++
+ ki.modified = rev
+}
+
+func (ki *keyIndex) restore(lg *zap.Logger, created, modified revision, ver int64) {
+ if len(ki.generations) != 0 {
+ if lg != nil {
+ lg.Panic(
+ "'restore' got an unexpected non-empty generations",
+ zap.Int("generations-size", len(ki.generations)),
+ )
+ } else {
+ plog.Panicf("store.keyindex: cannot restore non-empty keyIndex")
+ }
+ }
+
+ ki.modified = modified
+ g := generation{created: created, ver: ver, revs: []revision{modified}}
+ ki.generations = append(ki.generations, g)
+ keysGauge.Inc()
+}
+
+// tombstone puts a revision, pointing to a tombstone, to the keyIndex.
+// It also creates a new empty generation in the keyIndex.
+// It returns ErrRevisionNotFound when tombstone on an empty generation.
+func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error {
+ if ki.isEmpty() {
+ if lg != nil {
+ lg.Panic(
+ "'tombstone' got an unexpected empty keyIndex",
+ zap.String("key", string(ki.key)),
+ )
+ } else {
+ plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key))
+ }
+ }
+ if ki.generations[len(ki.generations)-1].isEmpty() {
+ return ErrRevisionNotFound
+ }
+ ki.put(lg, main, sub)
+ ki.generations = append(ki.generations, generation{})
+ keysGauge.Dec()
+ return nil
+}
+
+// get gets the modified, created revision and version of the key that satisfies the given atRev.
+// Rev must be higher than or equal to the given atRev.
+func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created revision, ver int64, err error) {
+ if ki.isEmpty() {
+ if lg != nil {
+ lg.Panic(
+ "'get' got an unexpected empty keyIndex",
+ zap.String("key", string(ki.key)),
+ )
+ } else {
+ plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key))
+ }
+ }
+ g := ki.findGeneration(atRev)
+ if g.isEmpty() {
+ return revision{}, revision{}, 0, ErrRevisionNotFound
+ }
+
+ n := g.walk(func(rev revision) bool { return rev.main > atRev })
+ if n != -1 {
+ return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil
+ }
+
+ return revision{}, revision{}, 0, ErrRevisionNotFound
+}
+
+// since returns revisions since the given rev. Only the revision with the
+// largest sub revision will be returned if multiple revisions have the same
+// main revision.
+func (ki *keyIndex) since(lg *zap.Logger, rev int64) []revision {
+ if ki.isEmpty() {
+ if lg != nil {
+ lg.Panic(
+ "'since' got an unexpected empty keyIndex",
+ zap.String("key", string(ki.key)),
+ )
+ } else {
+ plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key))
+ }
+ }
+ since := revision{rev, 0}
+ var gi int
+ // find the generations to start checking
+ for gi = len(ki.generations) - 1; gi > 0; gi-- {
+ g := ki.generations[gi]
+ if g.isEmpty() {
+ continue
+ }
+ if since.GreaterThan(g.created) {
+ break
+ }
+ }
+
+ var revs []revision
+ var last int64
+ for ; gi < len(ki.generations); gi++ {
+ for _, r := range ki.generations[gi].revs {
+ if since.GreaterThan(r) {
+ continue
+ }
+ if r.main == last {
+ // replace the revision with a new one that has higher sub value,
+ // because the original one should not be seen by external
+ revs[len(revs)-1] = r
+ continue
+ }
+ revs = append(revs, r)
+ last = r.main
+ }
+ }
+ return revs
+}
+
+// compact compacts a keyIndex by removing the versions with smaller or equal
+// revision than the given atRev except the largest one (If the largest one is
+// a tombstone, it will not be kept).
+// If a generation becomes empty during compaction, it will be removed.
+func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[revision]struct{}) {
+ if ki.isEmpty() {
+ if lg != nil {
+ lg.Panic(
+ "'compact' got an unexpected empty keyIndex",
+ zap.String("key", string(ki.key)),
+ )
+ } else {
+ plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key))
+ }
+ }
+
+ genIdx, revIndex := ki.doCompact(atRev, available)
+
+ g := &ki.generations[genIdx]
+ if !g.isEmpty() {
+ // remove the previous contents.
+ if revIndex != -1 {
+ g.revs = g.revs[revIndex:]
+ }
+ // remove any tombstone
+ if len(g.revs) == 1 && genIdx != len(ki.generations)-1 {
+ delete(available, g.revs[0])
+ genIdx++
+ }
+ }
+
+ // remove the previous generations.
+ ki.generations = ki.generations[genIdx:]
+}
+
+// keep finds the revision to be kept if compact is called at given atRev.
+func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) {
+ if ki.isEmpty() {
+ return
+ }
+
+ genIdx, revIndex := ki.doCompact(atRev, available)
+ g := &ki.generations[genIdx]
+ if !g.isEmpty() {
+ // remove any tombstone
+ if revIndex == len(g.revs)-1 && genIdx != len(ki.generations)-1 {
+ delete(available, g.revs[revIndex])
+ }
+ }
+}
+
+func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) {
+ // walk until reaching the first revision smaller or equal to "atRev",
+ // and add the revision to the available map
+ f := func(rev revision) bool {
+ if rev.main <= atRev {
+ available[rev] = struct{}{}
+ return false
+ }
+ return true
+ }
+
+ genIdx, g := 0, &ki.generations[0]
+ // find first generation includes atRev or created after atRev
+ for genIdx < len(ki.generations)-1 {
+ if tomb := g.revs[len(g.revs)-1].main; tomb > atRev {
+ break
+ }
+ genIdx++
+ g = &ki.generations[genIdx]
+ }
+
+ revIndex = g.walk(f)
+
+ return genIdx, revIndex
+}
+
+func (ki *keyIndex) isEmpty() bool {
+ return len(ki.generations) == 1 && ki.generations[0].isEmpty()
+}
+
+// findGeneration finds out the generation of the keyIndex that the
+// given rev belongs to. If the given rev is at the gap of two generations,
+// which means that the key does not exist at the given rev, it returns nil.
+func (ki *keyIndex) findGeneration(rev int64) *generation {
+ lastg := len(ki.generations) - 1
+ cg := lastg
+
+ for cg >= 0 {
+ if len(ki.generations[cg].revs) == 0 {
+ cg--
+ continue
+ }
+ g := ki.generations[cg]
+ if cg != lastg {
+ if tomb := g.revs[len(g.revs)-1].main; tomb <= rev {
+ return nil
+ }
+ }
+ if g.revs[0].main <= rev {
+ return &ki.generations[cg]
+ }
+ cg--
+ }
+ return nil
+}
+
+func (ki *keyIndex) Less(b btree.Item) bool {
+ return bytes.Compare(ki.key, b.(*keyIndex).key) == -1
+}
+
+func (ki *keyIndex) equal(b *keyIndex) bool {
+ if !bytes.Equal(ki.key, b.key) {
+ return false
+ }
+ if ki.modified != b.modified {
+ return false
+ }
+ if len(ki.generations) != len(b.generations) {
+ return false
+ }
+ for i := range ki.generations {
+ ag, bg := ki.generations[i], b.generations[i]
+ if !ag.equal(bg) {
+ return false
+ }
+ }
+ return true
+}
+
+func (ki *keyIndex) String() string {
+ var s string
+ for _, g := range ki.generations {
+ s += g.String()
+ }
+ return s
+}
+
+// generation contains multiple revisions of a key.
+type generation struct {
+ ver int64
+ created revision // when the generation is created (put in first revision).
+ revs []revision
+}
+
+func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 }
+
+// walk walks through the revisions in the generation in descending order.
+// It passes the revision to the given function.
+// walk returns until: 1. it finishes walking all pairs 2. the function returns false.
+// walk returns the position at where it stopped. If it stopped after
+// finishing walking, -1 will be returned.
+func (g *generation) walk(f func(rev revision) bool) int {
+ l := len(g.revs)
+ for i := range g.revs {
+ ok := f(g.revs[l-i-1])
+ if !ok {
+ return l - i - 1
+ }
+ }
+ return -1
+}
+
+func (g *generation) String() string {
+ return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs)
+}
+
+func (g generation) equal(b generation) bool {
+ if g.ver != b.ver {
+ return false
+ }
+ if len(g.revs) != len(b.revs) {
+ return false
+ }
+
+ for i := range g.revs {
+ ar, br := g.revs[i], b.revs[i]
+ if ar != br {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/kv.go b/vendor/go.etcd.io/etcd/mvcc/kv.go
new file mode 100644
index 000000000000..c057f9261183
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/kv.go
@@ -0,0 +1,150 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+ "go.etcd.io/etcd/pkg/traceutil"
+)
+
+type RangeOptions struct {
+ Limit int64
+ Rev int64
+ Count bool
+}
+
+type RangeResult struct {
+ KVs []mvccpb.KeyValue
+ Rev int64
+ Count int
+}
+
+type ReadView interface {
+ // FirstRev returns the first KV revision at the time of opening the txn.
+ // After a compaction, the first revision increases to the compaction
+ // revision.
+ FirstRev() int64
+
+ // Rev returns the revision of the KV at the time of opening the txn.
+ Rev() int64
+
+ // Range gets the keys in the range at rangeRev.
+ // The returned rev is the current revision of the KV when the operation is executed.
+ // If rangeRev <=0, range gets the keys at currentRev.
+ // If `end` is nil, the request returns the key.
+ // If `end` is not nil and not empty, it gets the keys in range [key, range_end).
+ // If `end` is not nil and empty, it gets the keys greater than or equal to key.
+ // Limit limits the number of keys returned.
+ // If the required rev is compacted, ErrCompacted will be returned.
+ Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error)
+}
+
+// TxnRead represents a read-only transaction with operations that will not
+// block other read transactions.
+type TxnRead interface {
+ ReadView
+ // End marks the transaction is complete and ready to commit.
+ End()
+}
+
+type WriteView interface {
+ // DeleteRange deletes the given range from the store.
+ // A deleteRange increases the rev of the store if any key in the range exists.
+ // The number of key deleted will be returned.
+ // The returned rev is the current revision of the KV when the operation is executed.
+ // It also generates one event for each key delete in the event history.
+ // if the `end` is nil, deleteRange deletes the key.
+ // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end).
+ DeleteRange(key, end []byte) (n, rev int64)
+
+ // Put puts the given key, value into the store. Put also takes additional argument lease to
+ // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease
+ // id.
+ // A put also increases the rev of the store, and generates one event in the event history.
+ // The returned rev is the current revision of the KV when the operation is executed.
+ Put(key, value []byte, lease lease.LeaseID) (rev int64)
+}
+
+// TxnWrite represents a transaction that can modify the store.
+type TxnWrite interface {
+ TxnRead
+ WriteView
+ // Changes gets the changes made since opening the write txn.
+ Changes() []mvccpb.KeyValue
+}
+
+// txnReadWrite coerces a read txn to a write, panicking on any write operation.
+type txnReadWrite struct{ TxnRead }
+
+func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") }
+func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+ panic("unexpected Put")
+}
+func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil }
+
+func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} }
+
+type KV interface {
+ ReadView
+ WriteView
+
+ // Read creates a read transaction.
+ Read(trace *traceutil.Trace) TxnRead
+
+ // Write creates a write transaction.
+ Write(trace *traceutil.Trace) TxnWrite
+
+ // Hash computes the hash of the KV's backend.
+ Hash() (hash uint32, revision int64, err error)
+
+ // HashByRev computes the hash of all MVCC revisions up to a given revision.
+ HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error)
+
+ // Compact frees all superseded keys with revisions less than rev.
+ Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error)
+
+ // Commit commits outstanding txns into the underlying backend.
+ Commit()
+
+ // Restore restores the KV store from a backend.
+ Restore(b backend.Backend) error
+ Close() error
+}
+
+// WatchableKV is a KV that can be watched.
+type WatchableKV interface {
+ KV
+ Watchable
+}
+
+// Watchable is the interface that wraps the NewWatchStream function.
+type Watchable interface {
+ // NewWatchStream returns a WatchStream that can be used to
+ // watch events happened or happening on the KV.
+ NewWatchStream() WatchStream
+}
+
+// ConsistentWatchableKV is a WatchableKV that understands the consistency
+// algorithm and consistent index.
+// If the consistent index of executing entry is not larger than the
+// consistent index of ConsistentWatchableKV, all operations in
+// this entry are skipped and return empty response.
+type ConsistentWatchableKV interface {
+ WatchableKV
+ // ConsistentIndex returns the current consistent index of the KV.
+ ConsistentIndex() uint64
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/kv_view.go b/vendor/go.etcd.io/etcd/mvcc/kv_view.go
new file mode 100644
index 000000000000..d4f0ca6880ac
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/kv_view.go
@@ -0,0 +1,54 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/pkg/traceutil"
+)
+
+type readView struct{ kv KV }
+
+func (rv *readView) FirstRev() int64 {
+ tr := rv.kv.Read(traceutil.TODO())
+ defer tr.End()
+ return tr.FirstRev()
+}
+
+func (rv *readView) Rev() int64 {
+ tr := rv.kv.Read(traceutil.TODO())
+ defer tr.End()
+ return tr.Rev()
+}
+
+func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ tr := rv.kv.Read(traceutil.TODO())
+ defer tr.End()
+ return tr.Range(key, end, ro)
+}
+
+type writeView struct{ kv KV }
+
+func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) {
+ tw := wv.kv.Write(traceutil.TODO())
+ defer tw.End()
+ return tw.DeleteRange(key, end)
+}
+
+func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+ tw := wv.kv.Write(traceutil.TODO())
+ defer tw.End()
+ return tw.Put(key, value, lease)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/kvstore.go b/vendor/go.etcd.io/etcd/mvcc/kvstore.go
new file mode 100644
index 000000000000..6752038083a6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/kvstore.go
@@ -0,0 +1,624 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+ "go.etcd.io/etcd/pkg/schedule"
+ "go.etcd.io/etcd/pkg/traceutil"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.uber.org/zap"
+)
+
+var (
+ keyBucketName = []byte("key")
+ metaBucketName = []byte("meta")
+
+ consistentIndexKeyName = []byte("consistent_index")
+ scheduledCompactKeyName = []byte("scheduledCompactRev")
+ finishedCompactKeyName = []byte("finishedCompactRev")
+
+ ErrCompacted = errors.New("mvcc: required revision has been compacted")
+ ErrFutureRev = errors.New("mvcc: required revision is a future revision")
+ ErrCanceled = errors.New("mvcc: watcher is canceled")
+ ErrClosed = errors.New("mvcc: closed")
+
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "mvcc")
+)
+
+const (
+ // markedRevBytesLen is the byte length of marked revision.
+ // The first `revBytesLen` bytes represents a normal revision. The last
+ // one byte is the mark.
+ markedRevBytesLen = revBytesLen + 1
+ markBytePosition = markedRevBytesLen - 1
+ markTombstone byte = 't'
+)
+
+var restoreChunkKeys = 10000 // non-const for testing
+var defaultCompactBatchLimit = 1000
+
+// ConsistentIndexGetter is an interface that wraps the Get method.
+// Consistent index is the offset of an entry in a consistent replicated log.
+type ConsistentIndexGetter interface {
+ // ConsistentIndex returns the consistent index of current executing entry.
+ ConsistentIndex() uint64
+}
+
+type StoreConfig struct {
+ CompactionBatchLimit int
+}
+
+type store struct {
+ ReadView
+ WriteView
+
+ // consistentIndex caches the "consistent_index" key's value. Accessed
+ // through atomics so must be 64-bit aligned.
+ consistentIndex uint64
+
+ cfg StoreConfig
+
+ // mu read locks for txns and write locks for non-txn store changes.
+ mu sync.RWMutex
+
+ ig ConsistentIndexGetter
+
+ b backend.Backend
+ kvindex index
+
+ le lease.Lessor
+
+ // revMuLock protects currentRev and compactMainRev.
+ // Locked at end of write txn and released after write txn unlock lock.
+ // Locked before locking read txn and released after locking.
+ revMu sync.RWMutex
+ // currentRev is the revision of the last completed transaction.
+ currentRev int64
+ // compactMainRev is the main revision of the last compaction.
+ compactMainRev int64
+
+ // bytesBuf8 is a byte slice of length 8
+ // to avoid a repetitive allocation in saveIndex.
+ bytesBuf8 []byte
+
+ fifoSched schedule.Scheduler
+
+ stopc chan struct{}
+
+ lg *zap.Logger
+}
+
+// NewStore returns a new store. It is useful to create a store inside
+// mvcc pkg. It should only be used for testing externally.
+func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter, cfg StoreConfig) *store {
+ if cfg.CompactionBatchLimit == 0 {
+ cfg.CompactionBatchLimit = defaultCompactBatchLimit
+ }
+ s := &store{
+ cfg: cfg,
+ b: b,
+ ig: ig,
+ kvindex: newTreeIndex(lg),
+
+ le: le,
+
+ currentRev: 1,
+ compactMainRev: -1,
+
+ bytesBuf8: make([]byte, 8),
+ fifoSched: schedule.NewFIFOScheduler(),
+
+ stopc: make(chan struct{}),
+
+ lg: lg,
+ }
+ s.ReadView = &readView{s}
+ s.WriteView = &writeView{s}
+ if s.le != nil {
+ s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
+ }
+
+ tx := s.b.BatchTx()
+ tx.Lock()
+ tx.UnsafeCreateBucket(keyBucketName)
+ tx.UnsafeCreateBucket(metaBucketName)
+ tx.Unlock()
+ s.b.ForceCommit()
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if err := s.restore(); err != nil {
+ // TODO: return the error instead of panic here?
+ panic("failed to recover store from backend")
+ }
+
+ return s
+}
+
+func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
+ if ctx == nil || ctx.Err() != nil {
+ select {
+ case <-s.stopc:
+ default:
+ // fix deadlock in mvcc,for more information, please refer to pr 11817.
+ // s.stopc is only updated in restore operation, which is called by apply
+ // snapshot call, compaction and apply snapshot requests are serialized by
+ // raft, and do not happen at the same time.
+ s.mu.Lock()
+ f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
+ s.fifoSched.Schedule(f)
+ s.mu.Unlock()
+ }
+ return
+ }
+ close(ch)
+}
+
+func (s *store) Hash() (hash uint32, revision int64, err error) {
+ start := time.Now()
+
+ s.b.ForceCommit()
+ h, err := s.b.Hash(DefaultIgnores)
+
+ hashSec.Observe(time.Since(start).Seconds())
+ return h, s.currentRev, err
+}
+
+func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) {
+ start := time.Now()
+
+ s.mu.RLock()
+ s.revMu.RLock()
+ compactRev, currentRev = s.compactMainRev, s.currentRev
+ s.revMu.RUnlock()
+
+ if rev > 0 && rev <= compactRev {
+ s.mu.RUnlock()
+ return 0, 0, compactRev, ErrCompacted
+ } else if rev > 0 && rev > currentRev {
+ s.mu.RUnlock()
+ return 0, currentRev, 0, ErrFutureRev
+ }
+
+ if rev == 0 {
+ rev = currentRev
+ }
+ keep := s.kvindex.Keep(rev)
+
+ tx := s.b.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ s.mu.RUnlock()
+
+ upper := revision{main: rev + 1}
+ lower := revision{main: compactRev + 1}
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+ h.Write(keyBucketName)
+ err = tx.UnsafeForEach(keyBucketName, func(k, v []byte) error {
+ kr := bytesToRev(k)
+ if !upper.GreaterThan(kr) {
+ return nil
+ }
+ // skip revisions that are scheduled for deletion
+ // due to compacting; don't skip if there isn't one.
+ if lower.GreaterThan(kr) && len(keep) > 0 {
+ if _, ok := keep[kr]; !ok {
+ return nil
+ }
+ }
+ h.Write(k)
+ h.Write(v)
+ return nil
+ })
+ hash = h.Sum32()
+
+ hashRevSec.Observe(time.Since(start).Seconds())
+ return hash, currentRev, compactRev, err
+}
+
+func (s *store) updateCompactRev(rev int64) (<-chan struct{}, error) {
+ s.revMu.Lock()
+ if rev <= s.compactMainRev {
+ ch := make(chan struct{})
+ f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
+ s.fifoSched.Schedule(f)
+ s.revMu.Unlock()
+ return ch, ErrCompacted
+ }
+ if rev > s.currentRev {
+ s.revMu.Unlock()
+ return nil, ErrFutureRev
+ }
+
+ s.compactMainRev = rev
+
+ rbytes := newRevBytes()
+ revToBytes(revision{main: rev}, rbytes)
+
+ tx := s.b.BatchTx()
+ tx.Lock()
+ tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes)
+ tx.Unlock()
+ // ensure that desired compaction is persisted
+ s.b.ForceCommit()
+
+ s.revMu.Unlock()
+
+ return nil, nil
+}
+
+func (s *store) compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
+ ch := make(chan struct{})
+ var j = func(ctx context.Context) {
+ if ctx.Err() != nil {
+ s.compactBarrier(ctx, ch)
+ return
+ }
+ start := time.Now()
+ keep := s.kvindex.Compact(rev)
+ indexCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
+ if !s.scheduleCompaction(rev, keep) {
+ s.compactBarrier(nil, ch)
+ return
+ }
+ close(ch)
+ }
+
+ s.fifoSched.Schedule(j)
+ trace.Step("schedule compaction")
+ return ch, nil
+}
+
+func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) {
+ ch, err := s.updateCompactRev(rev)
+ if nil != err {
+ return ch, err
+ }
+
+ return s.compact(traceutil.TODO(), rev)
+}
+
+func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
+ s.mu.Lock()
+
+ ch, err := s.updateCompactRev(rev)
+ trace.Step("check and update compact revision")
+ if err != nil {
+ s.mu.Unlock()
+ return ch, err
+ }
+ s.mu.Unlock()
+
+ return s.compact(trace, rev)
+}
+
+// DefaultIgnores is a map of keys to ignore in hash checking.
+var DefaultIgnores map[backend.IgnoreKey]struct{}
+
+func init() {
+ DefaultIgnores = map[backend.IgnoreKey]struct{}{
+ // consistent index might be changed due to v2 internal sync, which
+ // is not controllable by the user.
+ {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {},
+ }
+}
+
+func (s *store) Commit() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ tx := s.b.BatchTx()
+ tx.Lock()
+ s.saveIndex(tx)
+ tx.Unlock()
+ s.b.ForceCommit()
+}
+
+func (s *store) Restore(b backend.Backend) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ close(s.stopc)
+ s.fifoSched.Stop()
+
+ atomic.StoreUint64(&s.consistentIndex, 0)
+ s.b = b
+ s.kvindex = newTreeIndex(s.lg)
+ s.currentRev = 1
+ s.compactMainRev = -1
+ s.fifoSched = schedule.NewFIFOScheduler()
+ s.stopc = make(chan struct{})
+
+ return s.restore()
+}
+
+func (s *store) restore() error {
+ s.setupMetricsReporter()
+
+ min, max := newRevBytes(), newRevBytes()
+ revToBytes(revision{main: 1}, min)
+ revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max)
+
+ keyToLease := make(map[string]lease.LeaseID)
+
+ // restore index
+ tx := s.b.BatchTx()
+ tx.Lock()
+
+ _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0)
+ if len(finishedCompactBytes) != 0 {
+ s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
+
+ if s.lg != nil {
+ s.lg.Info(
+ "restored last compact revision",
+ zap.String("meta-bucket-name", string(metaBucketName)),
+ zap.String("meta-bucket-name-key", string(finishedCompactKeyName)),
+ zap.Int64("restored-compact-revision", s.compactMainRev),
+ )
+ } else {
+ plog.Printf("restore compact to %d", s.compactMainRev)
+ }
+ }
+ _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
+ scheduledCompact := int64(0)
+ if len(scheduledCompactBytes) != 0 {
+ scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main
+ }
+
+ // index keys concurrently as they're loaded in from tx
+ keysGauge.Set(0)
+ rkvc, revc := restoreIntoIndex(s.lg, s.kvindex)
+ for {
+ keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys))
+ if len(keys) == 0 {
+ break
+ }
+ // rkvc blocks if the total pending keys exceeds the restore
+ // chunk size to keep keys from consuming too much memory.
+ restoreChunk(s.lg, rkvc, keys, vals, keyToLease)
+ if len(keys) < restoreChunkKeys {
+ // partial set implies final set
+ break
+ }
+ // next set begins after where this one ended
+ newMin := bytesToRev(keys[len(keys)-1][:revBytesLen])
+ newMin.sub++
+ revToBytes(newMin, min)
+ }
+ close(rkvc)
+ s.currentRev = <-revc
+
+ // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
+ // the correct revision should be set to compaction revision in the case, not the largest revision
+ // we have seen.
+ if s.currentRev < s.compactMainRev {
+ s.currentRev = s.compactMainRev
+ }
+ if scheduledCompact <= s.compactMainRev {
+ scheduledCompact = 0
+ }
+
+ for key, lid := range keyToLease {
+ if s.le == nil {
+ panic("no lessor to attach lease")
+ }
+ err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
+ if err != nil {
+ if s.lg != nil {
+ s.lg.Warn(
+ "failed to attach a lease",
+ zap.String("lease-id", fmt.Sprintf("%016x", lid)),
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("unexpected Attach error: %v", err)
+ }
+ }
+ }
+
+ tx.Unlock()
+
+ if scheduledCompact != 0 {
+ s.compactLockfree(scheduledCompact)
+
+ if s.lg != nil {
+ s.lg.Info(
+ "resume scheduled compaction",
+ zap.String("meta-bucket-name", string(metaBucketName)),
+ zap.String("meta-bucket-name-key", string(scheduledCompactKeyName)),
+ zap.Int64("scheduled-compact-revision", scheduledCompact),
+ )
+ } else {
+ plog.Printf("resume scheduled compaction at %d", scheduledCompact)
+ }
+ }
+
+ return nil
+}
+
+type revKeyValue struct {
+ key []byte
+ kv mvccpb.KeyValue
+ kstr string
+}
+
+func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) {
+ rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
+ go func() {
+ currentRev := int64(1)
+ defer func() { revc <- currentRev }()
+ // restore the tree index from streaming the unordered index.
+ kiCache := make(map[string]*keyIndex, restoreChunkKeys)
+ for rkv := range rkvc {
+ ki, ok := kiCache[rkv.kstr]
+ // purge kiCache if many keys but still missing in the cache
+ if !ok && len(kiCache) >= restoreChunkKeys {
+ i := 10
+ for k := range kiCache {
+ delete(kiCache, k)
+ if i--; i == 0 {
+ break
+ }
+ }
+ }
+ // cache miss, fetch from tree index if there
+ if !ok {
+ ki = &keyIndex{key: rkv.kv.Key}
+ if idxKey := idx.KeyIndex(ki); idxKey != nil {
+ kiCache[rkv.kstr], ki = idxKey, idxKey
+ ok = true
+ }
+ }
+ rev := bytesToRev(rkv.key)
+ currentRev = rev.main
+ if ok {
+ if isTombstone(rkv.key) {
+ ki.tombstone(lg, rev.main, rev.sub)
+ continue
+ }
+ ki.put(lg, rev.main, rev.sub)
+ } else if !isTombstone(rkv.key) {
+ ki.restore(lg, revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
+ idx.Insert(ki)
+ kiCache[rkv.kstr] = ki
+ }
+ }
+ }()
+ return rkvc, revc
+}
+
+func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
+ for i, key := range keys {
+ rkv := revKeyValue{key: key}
+ if err := rkv.kv.Unmarshal(vals[i]); err != nil {
+ if lg != nil {
+ lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
+ } else {
+ plog.Fatalf("cannot unmarshal event: %v", err)
+ }
+ }
+ rkv.kstr = string(rkv.kv.Key)
+ if isTombstone(key) {
+ delete(keyToLease, rkv.kstr)
+ } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
+ keyToLease[rkv.kstr] = lid
+ } else {
+ delete(keyToLease, rkv.kstr)
+ }
+ kvc <- rkv
+ }
+}
+
+func (s *store) Close() error {
+ close(s.stopc)
+ s.fifoSched.Stop()
+ return nil
+}
+
+func (s *store) saveIndex(tx backend.BatchTx) {
+ if s.ig == nil {
+ return
+ }
+ bs := s.bytesBuf8
+ ci := s.ig.ConsistentIndex()
+ binary.BigEndian.PutUint64(bs, ci)
+ // put the index into the underlying backend
+ // tx has been locked in TxnBegin, so there is no need to lock it again
+ tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
+ atomic.StoreUint64(&s.consistentIndex, ci)
+}
+
+func (s *store) ConsistentIndex() uint64 {
+ if ci := atomic.LoadUint64(&s.consistentIndex); ci > 0 {
+ return ci
+ }
+ tx := s.b.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
+ if len(vs) == 0 {
+ return 0
+ }
+ v := binary.BigEndian.Uint64(vs[0])
+ atomic.StoreUint64(&s.consistentIndex, v)
+ return v
+}
+
+func (s *store) setupMetricsReporter() {
+ b := s.b
+ reportDbTotalSizeInBytesMu.Lock()
+ reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
+ reportDbTotalSizeInBytesMu.Unlock()
+ reportDbTotalSizeInBytesDebugMu.Lock()
+ reportDbTotalSizeInBytesDebug = func() float64 { return float64(b.Size()) }
+ reportDbTotalSizeInBytesDebugMu.Unlock()
+ reportDbTotalSizeInUseInBytesMu.Lock()
+ reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
+ reportDbTotalSizeInUseInBytesMu.Unlock()
+ reportDbOpenReadTxNMu.Lock()
+ reportDbOpenReadTxN = func() float64 { return float64(b.OpenReadTxN()) }
+ reportDbOpenReadTxNMu.Unlock()
+ reportCurrentRevMu.Lock()
+ reportCurrentRev = func() float64 {
+ s.revMu.RLock()
+ defer s.revMu.RUnlock()
+ return float64(s.currentRev)
+ }
+ reportCurrentRevMu.Unlock()
+ reportCompactRevMu.Lock()
+ reportCompactRev = func() float64 {
+ s.revMu.RLock()
+ defer s.revMu.RUnlock()
+ return float64(s.compactMainRev)
+ }
+ reportCompactRevMu.Unlock()
+}
+
+// appendMarkTombstone appends tombstone mark to normal revision bytes.
+func appendMarkTombstone(lg *zap.Logger, b []byte) []byte {
+ if len(b) != revBytesLen {
+ if lg != nil {
+ lg.Panic(
+ "cannot append tombstone mark to non-normal revision bytes",
+ zap.Int("expected-revision-bytes-size", revBytesLen),
+ zap.Int("given-revision-bytes-size", len(b)),
+ )
+ } else {
+ plog.Panicf("cannot append mark to non normal revision bytes")
+ }
+ }
+ return append(b, markTombstone)
+}
+
+// isTombstone checks whether the revision bytes is a tombstone.
+func isTombstone(b []byte) bool {
+ return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/kvstore_compaction.go b/vendor/go.etcd.io/etcd/mvcc/kvstore_compaction.go
new file mode 100644
index 000000000000..4c6b062b433d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/kvstore_compaction.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "encoding/binary"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool {
+ totalStart := time.Now()
+ defer func() { dbCompactionTotalMs.Observe(float64(time.Since(totalStart) / time.Millisecond)) }()
+ keyCompactions := 0
+ defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }()
+
+ end := make([]byte, 8)
+ binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
+
+ last := make([]byte, 8+1+8)
+ for {
+ var rev revision
+
+ start := time.Now()
+
+ tx := s.b.BatchTx()
+ tx.Lock()
+ keys, _ := tx.UnsafeRange(keyBucketName, last, end, int64(s.cfg.CompactionBatchLimit))
+ for _, key := range keys {
+ rev = bytesToRev(key)
+ if _, ok := keep[rev]; !ok {
+ tx.UnsafeDelete(keyBucketName, key)
+ keyCompactions++
+ }
+ }
+
+ if len(keys) < s.cfg.CompactionBatchLimit {
+ rbytes := make([]byte, 8+1+8)
+ revToBytes(revision{main: compactMainRev}, rbytes)
+ tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes)
+ tx.Unlock()
+ if s.lg != nil {
+ s.lg.Info(
+ "finished scheduled compaction",
+ zap.Int64("compact-revision", compactMainRev),
+ zap.Duration("took", time.Since(totalStart)),
+ )
+ } else {
+ plog.Infof("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart))
+ }
+ return true
+ }
+
+ // update last
+ revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last)
+ tx.Unlock()
+ // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer
+ s.b.ForceCommit()
+ dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
+
+ select {
+ case <-time.After(10 * time.Millisecond):
+ case <-s.stopc:
+ return false
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/kvstore_txn.go b/vendor/go.etcd.io/etcd/mvcc/kvstore_txn.go
new file mode 100644
index 000000000000..716a6d82ff2e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/kvstore_txn.go
@@ -0,0 +1,321 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+ "go.etcd.io/etcd/pkg/traceutil"
+ "go.uber.org/zap"
+)
+
+type storeTxnRead struct {
+ s *store
+ tx backend.ReadTx
+
+ firstRev int64
+ rev int64
+
+ trace *traceutil.Trace
+}
+
+func (s *store) Read(trace *traceutil.Trace) TxnRead {
+ s.mu.RLock()
+ s.revMu.RLock()
+ // backend holds b.readTx.RLock() only when creating the concurrentReadTx. After
+ // ConcurrentReadTx is created, it will not block write transaction.
+ tx := s.b.ConcurrentReadTx()
+ tx.RLock() // RLock is no-op. concurrentReadTx does not need to be locked after it is created.
+ firstRev, rev := s.compactMainRev, s.currentRev
+ s.revMu.RUnlock()
+ return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev, trace})
+}
+
+func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev }
+func (tr *storeTxnRead) Rev() int64 { return tr.rev }
+
+func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ return tr.rangeKeys(key, end, tr.Rev(), ro)
+}
+
+func (tr *storeTxnRead) End() {
+ tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx.
+ tr.s.mu.RUnlock()
+}
+
+type storeTxnWrite struct {
+ storeTxnRead
+ tx backend.BatchTx
+ // beginRev is the revision where the txn begins; it will write to the next revision.
+ beginRev int64
+ changes []mvccpb.KeyValue
+}
+
+func (s *store) Write(trace *traceutil.Trace) TxnWrite {
+ s.mu.RLock()
+ tx := s.b.BatchTx()
+ tx.Lock()
+ tw := &storeTxnWrite{
+ storeTxnRead: storeTxnRead{s, tx, 0, 0, trace},
+ tx: tx,
+ beginRev: s.currentRev,
+ changes: make([]mvccpb.KeyValue, 0, 4),
+ }
+ return newMetricsTxnWrite(tw)
+}
+
+func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev }
+
+func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ rev := tw.beginRev
+ if len(tw.changes) > 0 {
+ rev++
+ }
+ return tw.rangeKeys(key, end, rev, ro)
+}
+
+func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) {
+ if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 {
+ return n, tw.beginRev + 1
+ }
+ return 0, tw.beginRev
+}
+
+func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 {
+ tw.put(key, value, lease)
+ return tw.beginRev + 1
+}
+
+func (tw *storeTxnWrite) End() {
+ // only update index if the txn modifies the mvcc state.
+ if len(tw.changes) != 0 {
+ tw.s.saveIndex(tw.tx)
+ // hold revMu lock to prevent new read txns from opening until writeback.
+ tw.s.revMu.Lock()
+ tw.s.currentRev++
+ }
+ tw.tx.Unlock()
+ if len(tw.changes) != 0 {
+ tw.s.revMu.Unlock()
+ }
+ tw.s.mu.RUnlock()
+}
+
+func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) {
+ rev := ro.Rev
+ if rev > curRev {
+ return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev
+ }
+ if rev <= 0 {
+ rev = curRev
+ }
+ if rev < tr.s.compactMainRev {
+ return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted
+ }
+
+ revpairs := tr.s.kvindex.Revisions(key, end, rev)
+ tr.trace.Step("range keys from in-memory index tree")
+ if len(revpairs) == 0 {
+ return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil
+ }
+ if ro.Count {
+ return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil
+ }
+
+ limit := int(ro.Limit)
+ if limit <= 0 || limit > len(revpairs) {
+ limit = len(revpairs)
+ }
+
+ kvs := make([]mvccpb.KeyValue, limit)
+ revBytes := newRevBytes()
+ for i, revpair := range revpairs[:len(kvs)] {
+ revToBytes(revpair, revBytes)
+ _, vs := tr.tx.UnsafeRange(keyBucketName, revBytes, nil, 0)
+ if len(vs) != 1 {
+ if tr.s.lg != nil {
+ tr.s.lg.Fatal(
+ "range failed to find revision pair",
+ zap.Int64("revision-main", revpair.main),
+ zap.Int64("revision-sub", revpair.sub),
+ )
+ } else {
+ plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub)
+ }
+ }
+ if err := kvs[i].Unmarshal(vs[0]); err != nil {
+ if tr.s.lg != nil {
+ tr.s.lg.Fatal(
+ "failed to unmarshal mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot unmarshal event: %v", err)
+ }
+ }
+ }
+ tr.trace.Step("range keys from bolt db")
+ return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil
+}
+
+func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
+ rev := tw.beginRev + 1
+ c := rev
+ oldLease := lease.NoLease
+
+ // if the key exists before, use its previous created and
+ // get its previous leaseID
+ _, created, ver, err := tw.s.kvindex.Get(key, rev)
+ if err == nil {
+ c = created.main
+ oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
+ }
+ tw.trace.Step("get key's previous created_revision and leaseID")
+ ibytes := newRevBytes()
+ idxRev := revision{main: rev, sub: int64(len(tw.changes))}
+ revToBytes(idxRev, ibytes)
+
+ ver = ver + 1
+ kv := mvccpb.KeyValue{
+ Key: key,
+ Value: value,
+ CreateRevision: c,
+ ModRevision: rev,
+ Version: ver,
+ Lease: int64(leaseID),
+ }
+
+ d, err := kv.Marshal()
+ if err != nil {
+ if tw.storeTxnRead.s.lg != nil {
+ tw.storeTxnRead.s.lg.Fatal(
+ "failed to marshal mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot marshal event: %v", err)
+ }
+ }
+
+ tw.trace.Step("marshal mvccpb.KeyValue")
+ tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
+ tw.s.kvindex.Put(key, idxRev)
+ tw.changes = append(tw.changes, kv)
+ tw.trace.Step("store kv pair into bolt db")
+
+ if oldLease != lease.NoLease {
+ if tw.s.le == nil {
+ panic("no lessor to detach lease")
+ }
+ err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}})
+ if err != nil {
+ if tw.storeTxnRead.s.lg != nil {
+ tw.storeTxnRead.s.lg.Fatal(
+ "failed to detach old lease from a key",
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("unexpected error from lease detach: %v", err)
+ }
+ }
+ }
+ if leaseID != lease.NoLease {
+ if tw.s.le == nil {
+ panic("no lessor to attach lease")
+ }
+ err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}})
+ if err != nil {
+ panic("unexpected error from lease Attach")
+ }
+ }
+ tw.trace.Step("attach lease to kv pair")
+}
+
+func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 {
+ rrev := tw.beginRev
+ if len(tw.changes) > 0 {
+ rrev++
+ }
+ keys, _ := tw.s.kvindex.Range(key, end, rrev)
+ if len(keys) == 0 {
+ return 0
+ }
+ for _, key := range keys {
+ tw.delete(key)
+ }
+ return int64(len(keys))
+}
+
+func (tw *storeTxnWrite) delete(key []byte) {
+ ibytes := newRevBytes()
+ idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))}
+ revToBytes(idxRev, ibytes)
+
+ if tw.storeTxnRead.s != nil && tw.storeTxnRead.s.lg != nil {
+ ibytes = appendMarkTombstone(tw.storeTxnRead.s.lg, ibytes)
+ } else {
+ // TODO: remove this in v3.5
+ ibytes = appendMarkTombstone(nil, ibytes)
+ }
+
+ kv := mvccpb.KeyValue{Key: key}
+
+ d, err := kv.Marshal()
+ if err != nil {
+ if tw.storeTxnRead.s.lg != nil {
+ tw.storeTxnRead.s.lg.Fatal(
+ "failed to marshal mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot marshal event: %v", err)
+ }
+ }
+
+ tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
+ err = tw.s.kvindex.Tombstone(key, idxRev)
+ if err != nil {
+ if tw.storeTxnRead.s.lg != nil {
+ tw.storeTxnRead.s.lg.Fatal(
+ "failed to tombstone an existing key",
+ zap.String("key", string(key)),
+ zap.Error(err),
+ )
+ } else {
+ plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err)
+ }
+ }
+ tw.changes = append(tw.changes, kv)
+
+ item := lease.LeaseItem{Key: string(key)}
+ leaseID := tw.s.le.GetLease(item)
+
+ if leaseID != lease.NoLease {
+ err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item})
+ if err != nil {
+ if tw.storeTxnRead.s.lg != nil {
+ tw.storeTxnRead.s.lg.Fatal(
+ "failed to detach old lease from a key",
+ zap.Error(err),
+ )
+ } else {
+ plog.Errorf("cannot detach %v", err)
+ }
+ }
+ }
+}
+
+func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes }
diff --git a/vendor/go.etcd.io/etcd/mvcc/metrics.go b/vendor/go.etcd.io/etcd/mvcc/metrics.go
new file mode 100644
index 000000000000..42932c40d348
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/metrics.go
@@ -0,0 +1,345 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ rangeCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "range_total",
+ Help: "Total number of ranges seen by this member.",
+ })
+ rangeCounterDebug = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "range_total",
+ Help: "Total number of ranges seen by this member.",
+ })
+
+ putCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "put_total",
+ Help: "Total number of puts seen by this member.",
+ })
+ // TODO: remove in 3.5 release
+ putCounterDebug = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "put_total",
+ Help: "Total number of puts seen by this member.",
+ })
+
+ deleteCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "delete_total",
+ Help: "Total number of deletes seen by this member.",
+ })
+ // TODO: remove in 3.5 release
+ deleteCounterDebug = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "delete_total",
+ Help: "Total number of deletes seen by this member.",
+ })
+
+ txnCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "txn_total",
+ Help: "Total number of txns seen by this member.",
+ })
+ txnCounterDebug = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "txn_total",
+ Help: "Total number of txns seen by this member.",
+ })
+
+ keysGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "keys_total",
+ Help: "Total number of keys.",
+ })
+
+ watchStreamGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "watch_stream_total",
+ Help: "Total number of watch streams.",
+ })
+
+ watcherGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "watcher_total",
+ Help: "Total number of watchers.",
+ })
+
+ slowWatcherGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "slow_watcher_total",
+ Help: "Total number of unsynced slow watchers.",
+ })
+
+ totalEventsCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "events_total",
+ Help: "Total number of events sent by this member.",
+ })
+
+ pendingEventsGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "pending_events_total",
+ Help: "Total number of pending events to be sent.",
+ })
+
+ indexCompactionPauseMs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "index_compaction_pause_duration_milliseconds",
+ Help: "Bucketed histogram of index compaction pause duration.",
+
+ // lowest bucket start of upper bound 0.5 ms with factor 2
+ // highest bucket start of 0.5 ms * 2^13 == 4.096 sec
+ Buckets: prometheus.ExponentialBuckets(0.5, 2, 14),
+ })
+
+ dbCompactionPauseMs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_pause_duration_milliseconds",
+ Help: "Bucketed histogram of db compaction pause duration.",
+
+ // lowest bucket start of upper bound 1 ms with factor 2
+ // highest bucket start of 1 ms * 2^12 == 4.096 sec
+ Buckets: prometheus.ExponentialBuckets(1, 2, 13),
+ })
+
+ dbCompactionTotalMs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_total_duration_milliseconds",
+ Help: "Bucketed histogram of db compaction total duration.",
+
+ // lowest bucket start of upper bound 100 ms with factor 2
+ // highest bucket start of 100 ms * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(100, 2, 14),
+ })
+
+ dbCompactionKeysCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_keys_total",
+ Help: "Total number of db keys compacted.",
+ })
+
+ dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "db_total_size_in_bytes",
+ Help: "Total size of the underlying database physically allocated in bytes.",
+ },
+ func() float64 {
+ reportDbTotalSizeInBytesMu.RLock()
+ defer reportDbTotalSizeInBytesMu.RUnlock()
+ return reportDbTotalSizeInBytes()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbTotalSizeInBytesMu sync.RWMutex
+ reportDbTotalSizeInBytes = func() float64 { return 0 }
+
+ // TODO: remove this in v3.5
+ dbTotalSizeDebug = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_total_size_in_bytes",
+ Help: "Total size of the underlying database physically allocated in bytes.",
+ },
+ func() float64 {
+ reportDbTotalSizeInBytesDebugMu.RLock()
+ defer reportDbTotalSizeInBytesDebugMu.RUnlock()
+ return reportDbTotalSizeInBytesDebug()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbTotalSizeInBytesDebugMu sync.RWMutex
+ reportDbTotalSizeInBytesDebug = func() float64 { return 0 }
+
+ dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "db_total_size_in_use_in_bytes",
+ Help: "Total size of the underlying database logically in use in bytes.",
+ },
+ func() float64 {
+ reportDbTotalSizeInUseInBytesMu.RLock()
+ defer reportDbTotalSizeInUseInBytesMu.RUnlock()
+ return reportDbTotalSizeInUseInBytes()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbTotalSizeInUseInBytesMu sync.RWMutex
+ reportDbTotalSizeInUseInBytes = func() float64 { return 0 }
+
+ dbOpenReadTxN = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "db_open_read_transactions",
+ Help: "The number of currently open read transactions",
+ },
+
+ func() float64 {
+ reportDbOpenReadTxNMu.RLock()
+ defer reportDbOpenReadTxNMu.RUnlock()
+ return reportDbOpenReadTxN()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbOpenReadTxNMu sync.RWMutex
+ reportDbOpenReadTxN = func() float64 { return 0 }
+
+ hashSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "hash_duration_seconds",
+ Help: "The latency distribution of storage hash operation.",
+
+ // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
+ })
+
+ hashRevSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "hash_rev_duration_seconds",
+ Help: "The latency distribution of storage hash by revision operation.",
+
+ // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
+ })
+
+ currentRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "current_revision",
+ Help: "The current revision of store.",
+ },
+ func() float64 {
+ reportCurrentRevMu.RLock()
+ defer reportCurrentRevMu.RUnlock()
+ return reportCurrentRev()
+ },
+ )
+ // overridden by mvcc initialization
+ reportCurrentRevMu sync.RWMutex
+ reportCurrentRev = func() float64 { return 0 }
+
+ compactRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "compact_revision",
+ Help: "The revision of the last compaction in store.",
+ },
+ func() float64 {
+ reportCompactRevMu.RLock()
+ defer reportCompactRevMu.RUnlock()
+ return reportCompactRev()
+ },
+ )
+ // overridden by mvcc initialization
+ reportCompactRevMu sync.RWMutex
+ reportCompactRev = func() float64 { return 0 }
+
+ totalPutSizeGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "total_put_size_in_bytes",
+ Help: "The total size of put kv pairs seen by this member.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(rangeCounter)
+ prometheus.MustRegister(rangeCounterDebug)
+ prometheus.MustRegister(putCounter)
+ prometheus.MustRegister(putCounterDebug)
+ prometheus.MustRegister(deleteCounter)
+ prometheus.MustRegister(deleteCounterDebug)
+ prometheus.MustRegister(txnCounter)
+ prometheus.MustRegister(txnCounterDebug)
+ prometheus.MustRegister(keysGauge)
+ prometheus.MustRegister(watchStreamGauge)
+ prometheus.MustRegister(watcherGauge)
+ prometheus.MustRegister(slowWatcherGauge)
+ prometheus.MustRegister(totalEventsCounter)
+ prometheus.MustRegister(pendingEventsGauge)
+ prometheus.MustRegister(indexCompactionPauseMs)
+ prometheus.MustRegister(dbCompactionPauseMs)
+ prometheus.MustRegister(dbCompactionTotalMs)
+ prometheus.MustRegister(dbCompactionKeysCounter)
+ prometheus.MustRegister(dbTotalSize)
+ prometheus.MustRegister(dbTotalSizeDebug)
+ prometheus.MustRegister(dbTotalSizeInUse)
+ prometheus.MustRegister(dbOpenReadTxN)
+ prometheus.MustRegister(hashSec)
+ prometheus.MustRegister(hashRevSec)
+ prometheus.MustRegister(currentRev)
+ prometheus.MustRegister(compactRev)
+ prometheus.MustRegister(totalPutSizeGauge)
+}
+
+// ReportEventReceived reports that an event is received.
+// This function should be called when the external systems received an
+// event from mvcc.Watcher.
+func ReportEventReceived(n int) {
+ pendingEventsGauge.Sub(float64(n))
+ totalEventsCounter.Add(float64(n))
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/metrics_txn.go b/vendor/go.etcd.io/etcd/mvcc/metrics_txn.go
new file mode 100644
index 000000000000..17f1b31caf7d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/metrics_txn.go
@@ -0,0 +1,71 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import "go.etcd.io/etcd/lease"
+
+type metricsTxnWrite struct {
+ TxnWrite
+ ranges uint
+ puts uint
+ deletes uint
+ putSize int64
+}
+
+func newMetricsTxnRead(tr TxnRead) TxnRead {
+ return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0, 0}
+}
+
+func newMetricsTxnWrite(tw TxnWrite) TxnWrite {
+ return &metricsTxnWrite{tw, 0, 0, 0, 0}
+}
+
+func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) {
+ tw.ranges++
+ return tw.TxnWrite.Range(key, end, ro)
+}
+
+func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) {
+ tw.deletes++
+ return tw.TxnWrite.DeleteRange(key, end)
+}
+
+func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+ tw.puts++
+ size := int64(len(key) + len(value))
+ tw.putSize += size
+ return tw.TxnWrite.Put(key, value, lease)
+}
+
+func (tw *metricsTxnWrite) End() {
+ defer tw.TxnWrite.End()
+ if sum := tw.ranges + tw.puts + tw.deletes; sum > 1 {
+ txnCounter.Inc()
+ txnCounterDebug.Inc() // TODO: remove in 3.5 release
+ }
+
+ ranges := float64(tw.ranges)
+ rangeCounter.Add(ranges)
+ rangeCounterDebug.Add(ranges) // TODO: remove in 3.5 release
+
+ puts := float64(tw.puts)
+ putCounter.Add(puts)
+ putCounterDebug.Add(puts) // TODO: remove in 3.5 release
+ totalPutSizeGauge.Add(float64(tw.putSize))
+
+ deletes := float64(tw.deletes)
+ deleteCounter.Add(deletes)
+ deleteCounterDebug.Add(deletes) // TODO: remove in 3.5 release
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/revision.go b/vendor/go.etcd.io/etcd/mvcc/revision.go
new file mode 100644
index 000000000000..d6213866f26e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/revision.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import "encoding/binary"
+
+// revBytesLen is the byte length of a normal revision.
+// First 8 bytes is the revision.main in big-endian format. The 9th byte
+// is a '_'. The last 8 bytes is the revision.sub in big-endian format.
+const revBytesLen = 8 + 1 + 8
+
+// A revision indicates modification of the key-value space.
+// The set of changes that share same main revision changes the key-value space atomically.
+type revision struct {
+ // main is the main revision of a set of changes that happen atomically.
+ main int64
+
+ // sub is the sub revision of a change in a set of changes that happen
+ // atomically. Each change has different increasing sub revision in that
+ // set.
+ sub int64
+}
+
+func (a revision) GreaterThan(b revision) bool {
+ if a.main > b.main {
+ return true
+ }
+ if a.main < b.main {
+ return false
+ }
+ return a.sub > b.sub
+}
+
+func newRevBytes() []byte {
+ return make([]byte, revBytesLen, markedRevBytesLen)
+}
+
+func revToBytes(rev revision, bytes []byte) {
+ binary.BigEndian.PutUint64(bytes, uint64(rev.main))
+ bytes[8] = '_'
+ binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub))
+}
+
+func bytesToRev(bytes []byte) revision {
+ return revision{
+ main: int64(binary.BigEndian.Uint64(bytes[0:8])),
+ sub: int64(binary.BigEndian.Uint64(bytes[9:])),
+ }
+}
+
+type revisions []revision
+
+func (a revisions) Len() int { return len(a) }
+func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) }
+func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/vendor/go.etcd.io/etcd/mvcc/util.go b/vendor/go.etcd.io/etcd/mvcc/util.go
new file mode 100644
index 000000000000..032621aedd93
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/util.go
@@ -0,0 +1,57 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+)
+
+func UpdateConsistentIndex(be backend.Backend, index uint64) {
+ tx := be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ var oldi uint64
+ _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
+ if len(vs) != 0 {
+ oldi = binary.BigEndian.Uint64(vs[0])
+ }
+
+ if index <= oldi {
+ return
+ }
+
+ bs := make([]byte, 8)
+ binary.BigEndian.PutUint64(bs, index)
+ tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
+}
+
+func WriteKV(be backend.Backend, kv mvccpb.KeyValue) {
+ ibytes := newRevBytes()
+ revToBytes(revision{main: kv.ModRevision}, ibytes)
+
+ d, err := kv.Marshal()
+ if err != nil {
+ panic(fmt.Errorf("cannot marshal event: %v", err))
+ }
+
+ be.BatchTx().Lock()
+ be.BatchTx().UnsafePut(keyBucketName, ibytes, d)
+ be.BatchTx().Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/watchable_store.go b/vendor/go.etcd.io/etcd/mvcc/watchable_store.go
new file mode 100644
index 000000000000..72c6b8be4bac
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/watchable_store.go
@@ -0,0 +1,558 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "go.etcd.io/etcd/auth"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/lease"
+ "go.etcd.io/etcd/mvcc/backend"
+ "go.etcd.io/etcd/mvcc/mvccpb"
+ "go.etcd.io/etcd/pkg/traceutil"
+ "go.uber.org/zap"
+)
+
+// non-const so modifiable by tests
+var (
+ // chanBufLen is the length of the buffered chan
+ // for sending out watched events.
+ // TODO: find a good buf value. 1024 is just a random one that
+ // seems to be reasonable.
+ chanBufLen = 1024
+
+ // maxWatchersPerSync is the number of watchers to sync in a single batch
+ maxWatchersPerSync = 512
+)
+
+type watchable interface {
+ watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
+ progress(w *watcher)
+ rev() int64
+}
+
+type watchableStore struct {
+ *store
+
+ // mu protects watcher groups and batches. It should never be locked
+ // before locking store.mu to avoid deadlock.
+ mu sync.RWMutex
+
+ // victims are watcher batches that were blocked on the watch channel
+ victims []watcherBatch
+ victimc chan struct{}
+
+ // contains all unsynced watchers that needs to sync with events that have happened
+ unsynced watcherGroup
+
+ // contains all synced watchers that are in sync with the progress of the store.
+ // The key of the map is the key that the watcher watches on.
+ synced watcherGroup
+
+ stopc chan struct{}
+ wg sync.WaitGroup
+}
+
+// cancelFunc updates unsynced and synced maps when running
+// cancel operations.
+type cancelFunc func()
+
+func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, as auth.AuthStore, ig ConsistentIndexGetter, cfg StoreConfig) ConsistentWatchableKV {
+ return newWatchableStore(lg, b, le, as, ig, cfg)
+}
+
+func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, as auth.AuthStore, ig ConsistentIndexGetter, cfg StoreConfig) *watchableStore {
+ s := &watchableStore{
+ store: NewStore(lg, b, le, ig, cfg),
+ victimc: make(chan struct{}, 1),
+ unsynced: newWatcherGroup(),
+ synced: newWatcherGroup(),
+ stopc: make(chan struct{}),
+ }
+ s.store.ReadView = &readView{s}
+ s.store.WriteView = &writeView{s}
+ if s.le != nil {
+ // use this store as the deleter so revokes trigger watch events
+ s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
+ }
+ if as != nil {
+ // TODO: encapsulating consistentindex into a separate package
+ as.SetConsistentIndexSyncer(s.store.saveIndex)
+ }
+ s.wg.Add(2)
+ go s.syncWatchersLoop()
+ go s.syncVictimsLoop()
+ return s
+}
+
+func (s *watchableStore) Close() error {
+ close(s.stopc)
+ s.wg.Wait()
+ return s.store.Close()
+}
+
+func (s *watchableStore) NewWatchStream() WatchStream {
+ watchStreamGauge.Inc()
+ return &watchStream{
+ watchable: s,
+ ch: make(chan WatchResponse, chanBufLen),
+ cancels: make(map[WatchID]cancelFunc),
+ watchers: make(map[WatchID]*watcher),
+ }
+}
+
+func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
+ wa := &watcher{
+ key: key,
+ end: end,
+ minRev: startRev,
+ id: id,
+ ch: ch,
+ fcs: fcs,
+ }
+
+ s.mu.Lock()
+ s.revMu.RLock()
+ synced := startRev > s.store.currentRev || startRev == 0
+ if synced {
+ wa.minRev = s.store.currentRev + 1
+ if startRev > wa.minRev {
+ wa.minRev = startRev
+ }
+ }
+ if synced {
+ s.synced.add(wa)
+ } else {
+ slowWatcherGauge.Inc()
+ s.unsynced.add(wa)
+ }
+ s.revMu.RUnlock()
+ s.mu.Unlock()
+
+ watcherGauge.Inc()
+
+ return wa, func() { s.cancelWatcher(wa) }
+}
+
+// cancelWatcher removes references of the watcher from the watchableStore
+func (s *watchableStore) cancelWatcher(wa *watcher) {
+ for {
+ s.mu.Lock()
+ if s.unsynced.delete(wa) {
+ slowWatcherGauge.Dec()
+ break
+ } else if s.synced.delete(wa) {
+ break
+ } else if wa.compacted {
+ break
+ } else if wa.ch == nil {
+ // already canceled (e.g., cancel/close race)
+ break
+ }
+
+ if !wa.victim {
+ panic("watcher not victim but not in watch groups")
+ }
+
+ var victimBatch watcherBatch
+ for _, wb := range s.victims {
+ if wb[wa] != nil {
+ victimBatch = wb
+ break
+ }
+ }
+ if victimBatch != nil {
+ slowWatcherGauge.Dec()
+ delete(victimBatch, wa)
+ break
+ }
+
+ // victim being processed so not accessible; retry
+ s.mu.Unlock()
+ time.Sleep(time.Millisecond)
+ }
+
+ watcherGauge.Dec()
+ wa.ch = nil
+ s.mu.Unlock()
+}
+
+func (s *watchableStore) Restore(b backend.Backend) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ err := s.store.Restore(b)
+ if err != nil {
+ return err
+ }
+
+ for wa := range s.synced.watchers {
+ wa.restore = true
+ s.unsynced.add(wa)
+ }
+ s.synced = newWatcherGroup()
+ return nil
+}
+
+// syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
+func (s *watchableStore) syncWatchersLoop() {
+ defer s.wg.Done()
+
+ for {
+ s.mu.RLock()
+ st := time.Now()
+ lastUnsyncedWatchers := s.unsynced.size()
+ s.mu.RUnlock()
+
+ unsyncedWatchers := 0
+ if lastUnsyncedWatchers > 0 {
+ unsyncedWatchers = s.syncWatchers()
+ }
+ syncDuration := time.Since(st)
+
+ waitDuration := 100 * time.Millisecond
+ // more work pending?
+ if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers {
+ // be fair to other store operations by yielding time taken
+ waitDuration = syncDuration
+ }
+
+ select {
+ case <-time.After(waitDuration):
+ case <-s.stopc:
+ return
+ }
+ }
+}
+
+// syncVictimsLoop tries to write precomputed watcher responses to
+// watchers that had a blocked watcher channel
+func (s *watchableStore) syncVictimsLoop() {
+ defer s.wg.Done()
+
+ for {
+ for s.moveVictims() != 0 {
+ // try to update all victim watchers
+ }
+ s.mu.RLock()
+ isEmpty := len(s.victims) == 0
+ s.mu.RUnlock()
+
+ var tickc <-chan time.Time
+ if !isEmpty {
+ tickc = time.After(10 * time.Millisecond)
+ }
+
+ select {
+ case <-tickc:
+ case <-s.victimc:
+ case <-s.stopc:
+ return
+ }
+ }
+}
+
+// moveVictims tries to update watches with already pending event data
+func (s *watchableStore) moveVictims() (moved int) {
+ s.mu.Lock()
+ victims := s.victims
+ s.victims = nil
+ s.mu.Unlock()
+
+ var newVictim watcherBatch
+ for _, wb := range victims {
+ // try to send responses again
+ for w, eb := range wb {
+ // watcher has observed the store up to, but not including, w.minRev
+ rev := w.minRev - 1
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
+ pendingEventsGauge.Add(float64(len(eb.evs)))
+ } else {
+ if newVictim == nil {
+ newVictim = make(watcherBatch)
+ }
+ newVictim[w] = eb
+ continue
+ }
+ moved++
+ }
+
+ // assign completed victim watchers to unsync/sync
+ s.mu.Lock()
+ s.store.revMu.RLock()
+ curRev := s.store.currentRev
+ for w, eb := range wb {
+ if newVictim != nil && newVictim[w] != nil {
+ // couldn't send watch response; stays victim
+ continue
+ }
+ w.victim = false
+ if eb.moreRev != 0 {
+ w.minRev = eb.moreRev
+ }
+ if w.minRev <= curRev {
+ s.unsynced.add(w)
+ } else {
+ slowWatcherGauge.Dec()
+ s.synced.add(w)
+ }
+ }
+ s.store.revMu.RUnlock()
+ s.mu.Unlock()
+ }
+
+ if len(newVictim) > 0 {
+ s.mu.Lock()
+ s.victims = append(s.victims, newVictim)
+ s.mu.Unlock()
+ }
+
+ return moved
+}
+
+// syncWatchers syncs unsynced watchers by:
+// 1. choose a set of watchers from the unsynced watcher group
+// 2. iterate over the set to get the minimum revision and remove compacted watchers
+// 3. use minimum revision to get all key-value pairs and send those events to watchers
+// 4. remove synced watchers in set from unsynced group and move to synced group
+func (s *watchableStore) syncWatchers() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.unsynced.size() == 0 {
+ return 0
+ }
+
+ s.store.revMu.RLock()
+ defer s.store.revMu.RUnlock()
+
+ // in order to find key-value pairs from unsynced watchers, we need to
+ // find min revision index, and these revisions can be used to
+ // query the backend store of key-value pairs
+ curRev := s.store.currentRev
+ compactionRev := s.store.compactMainRev
+
+ wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev)
+ minBytes, maxBytes := newRevBytes(), newRevBytes()
+ revToBytes(revision{main: minRev}, minBytes)
+ revToBytes(revision{main: curRev + 1}, maxBytes)
+
+ // UnsafeRange returns keys and values. And in boltdb, keys are revisions.
+ // values are actual key-value pairs in backend.
+ tx := s.store.b.ReadTx()
+ tx.RLock()
+ revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0)
+ var evs []mvccpb.Event
+ if s.store != nil && s.store.lg != nil {
+ evs = kvsToEvents(s.store.lg, wg, revs, vs)
+ } else {
+ // TODO: remove this in v3.5
+ evs = kvsToEvents(nil, wg, revs, vs)
+ }
+ tx.RUnlock()
+
+ var victims watcherBatch
+ wb := newWatcherBatch(wg, evs)
+ for w := range wg.watchers {
+ w.minRev = curRev + 1
+
+ eb, ok := wb[w]
+ if !ok {
+ // bring un-notified watcher to synced
+ s.synced.add(w)
+ s.unsynced.delete(w)
+ continue
+ }
+
+ if eb.moreRev != 0 {
+ w.minRev = eb.moreRev
+ }
+
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
+ pendingEventsGauge.Add(float64(len(eb.evs)))
+ } else {
+ if victims == nil {
+ victims = make(watcherBatch)
+ }
+ w.victim = true
+ }
+
+ if w.victim {
+ victims[w] = eb
+ } else {
+ if eb.moreRev != 0 {
+ // stay unsynced; more to read
+ continue
+ }
+ s.synced.add(w)
+ }
+ s.unsynced.delete(w)
+ }
+ s.addVictim(victims)
+
+ vsz := 0
+ for _, v := range s.victims {
+ vsz += len(v)
+ }
+ slowWatcherGauge.Set(float64(s.unsynced.size() + vsz))
+
+ return s.unsynced.size()
+}
+
+// kvsToEvents gets all events for the watchers from all key-value pairs
+func kvsToEvents(lg *zap.Logger, wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) {
+ for i, v := range vals {
+ var kv mvccpb.KeyValue
+ if err := kv.Unmarshal(v); err != nil {
+ if lg != nil {
+ lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
+ } else {
+ plog.Panicf("cannot unmarshal event: %v", err)
+ }
+ }
+
+ if !wg.contains(string(kv.Key)) {
+ continue
+ }
+
+ ty := mvccpb.PUT
+ if isTombstone(revs[i]) {
+ ty = mvccpb.DELETE
+ // patch in mod revision so watchers won't skip
+ kv.ModRevision = bytesToRev(revs[i]).main
+ }
+ evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty})
+ }
+ return evs
+}
+
+// notify notifies the fact that given event at the given rev just happened to
+// watchers that watch on the key of the event.
+func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
+ var victim watcherBatch
+ for w, eb := range newWatcherBatch(&s.synced, evs) {
+ if eb.revs != 1 {
+ if s.store != nil && s.store.lg != nil {
+ s.store.lg.Panic(
+ "unexpected multiple revisions in watch notification",
+ zap.Int("number-of-revisions", eb.revs),
+ )
+ } else {
+ plog.Panicf("unexpected multiple revisions in notification")
+ }
+ }
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
+ pendingEventsGauge.Add(float64(len(eb.evs)))
+ } else {
+ // move slow watcher to victims
+ w.minRev = rev + 1
+ if victim == nil {
+ victim = make(watcherBatch)
+ }
+ w.victim = true
+ victim[w] = eb
+ s.synced.delete(w)
+ slowWatcherGauge.Inc()
+ }
+ }
+ s.addVictim(victim)
+}
+
+func (s *watchableStore) addVictim(victim watcherBatch) {
+ if victim == nil {
+ return
+ }
+ s.victims = append(s.victims, victim)
+ select {
+ case s.victimc <- struct{}{}:
+ default:
+ }
+}
+
+func (s *watchableStore) rev() int64 { return s.store.Rev() }
+
+func (s *watchableStore) progress(w *watcher) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ if _, ok := s.synced.watchers[w]; ok {
+ w.send(WatchResponse{WatchID: w.id, Revision: s.rev()})
+ // If the ch is full, this watcher is receiving events.
+ // We do not need to send progress at all.
+ }
+}
+
+type watcher struct {
+ // the watcher key
+ key []byte
+ // end indicates the end of the range to watch.
+ // If end is set, the watcher is on a range.
+ end []byte
+
+ // victim is set when ch is blocked and undergoing victim processing
+ victim bool
+
+ // compacted is set when the watcher is removed because of compaction
+ compacted bool
+
+ // restore is true when the watcher is being restored from leader snapshot
+ // which means that this watcher has just been moved from "synced" to "unsynced"
+ // watcher group, possibly with a future revision when it was first added
+ // to the synced watcher
+ // "unsynced" watcher revision must always be <= current revision,
+ // except when the watcher were to be moved from "synced" watcher group
+ restore bool
+
+ // minRev is the minimum revision update the watcher will accept
+ minRev int64
+ id WatchID
+
+ fcs []FilterFunc
+ // a chan to send out the watch response.
+ // The chan might be shared with other watchers.
+ ch chan<- WatchResponse
+}
+
+func (w *watcher) send(wr WatchResponse) bool {
+ progressEvent := len(wr.Events) == 0
+
+ if len(w.fcs) != 0 {
+ ne := make([]mvccpb.Event, 0, len(wr.Events))
+ for i := range wr.Events {
+ filtered := false
+ for _, filter := range w.fcs {
+ if filter(wr.Events[i]) {
+ filtered = true
+ break
+ }
+ }
+ if !filtered {
+ ne = append(ne, wr.Events[i])
+ }
+ }
+ wr.Events = ne
+ }
+
+ // if all events are filtered out, we should send nothing.
+ if !progressEvent && len(wr.Events) == 0 {
+ return true
+ }
+ select {
+ case w.ch <- wr:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/watchable_store_txn.go b/vendor/go.etcd.io/etcd/mvcc/watchable_store_txn.go
new file mode 100644
index 000000000000..70b12983d970
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/watchable_store_txn.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "go.etcd.io/etcd/mvcc/mvccpb"
+ "go.etcd.io/etcd/pkg/traceutil"
+)
+
+func (tw *watchableStoreTxnWrite) End() {
+ changes := tw.Changes()
+ if len(changes) == 0 {
+ tw.TxnWrite.End()
+ return
+ }
+
+ rev := tw.Rev() + 1
+ evs := make([]mvccpb.Event, len(changes))
+ for i, change := range changes {
+ evs[i].Kv = &changes[i]
+ if change.CreateRevision == 0 {
+ evs[i].Type = mvccpb.DELETE
+ evs[i].Kv.ModRevision = rev
+ } else {
+ evs[i].Type = mvccpb.PUT
+ }
+ }
+
+ // end write txn under watchable store lock so the updates are visible
+ // when asynchronous event posting checks the current store revision
+ tw.s.mu.Lock()
+ tw.s.notify(rev, evs)
+ tw.TxnWrite.End()
+ tw.s.mu.Unlock()
+}
+
+type watchableStoreTxnWrite struct {
+ TxnWrite
+ s *watchableStore
+}
+
+func (s *watchableStore) Write(trace *traceutil.Trace) TxnWrite {
+ return &watchableStoreTxnWrite{s.store.Write(trace), s}
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/watcher.go b/vendor/go.etcd.io/etcd/mvcc/watcher.go
new file mode 100644
index 000000000000..2846d62a5d41
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/watcher.go
@@ -0,0 +1,193 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "bytes"
+ "errors"
+ "sync"
+
+ "go.etcd.io/etcd/mvcc/mvccpb"
+)
+
+// AutoWatchID is the watcher ID passed in WatchStream.Watch when no
+// user-provided ID is available. If pass, an ID will automatically be assigned.
+const AutoWatchID WatchID = 0
+
+var (
+ ErrWatcherNotExist = errors.New("mvcc: watcher does not exist")
+ ErrEmptyWatcherRange = errors.New("mvcc: watcher range is empty")
+ ErrWatcherDuplicateID = errors.New("mvcc: duplicate watch ID provided on the WatchStream")
+)
+
+type WatchID int64
+
+// FilterFunc returns true if the given event should be filtered out.
+type FilterFunc func(e mvccpb.Event) bool
+
+type WatchStream interface {
+ // Watch creates a watcher. The watcher watches the events happening or
+ // happened on the given key or range [key, end) from the given startRev.
+ //
+ // The whole event history can be watched unless compacted.
+ // If "startRev" <=0, watch observes events after currentRev.
+ //
+ // The returned "id" is the ID of this watcher. It appears as WatchID
+ // in events that are sent to the created watcher through stream channel.
+ // The watch ID is used when it's not equal to AutoWatchID. Otherwise,
+ // an auto-generated watch ID is returned.
+ Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error)
+
+ // Chan returns a chan. All watch response will be sent to the returned chan.
+ Chan() <-chan WatchResponse
+
+ // RequestProgress requests the progress of the watcher with given ID. The response
+ // will only be sent if the watcher is currently synced.
+ // The responses will be sent through the WatchRespone Chan attached
+ // with this stream to ensure correct ordering.
+ // The responses contains no events. The revision in the response is the progress
+ // of the watchers since the watcher is currently synced.
+ RequestProgress(id WatchID)
+
+ // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be
+ // returned.
+ Cancel(id WatchID) error
+
+ // Close closes Chan and release all related resources.
+ Close()
+
+ // Rev returns the current revision of the KV the stream watches on.
+ Rev() int64
+}
+
+type WatchResponse struct {
+ // WatchID is the WatchID of the watcher this response sent to.
+ WatchID WatchID
+
+ // Events contains all the events that needs to send.
+ Events []mvccpb.Event
+
+ // Revision is the revision of the KV when the watchResponse is created.
+ // For a normal response, the revision should be the same as the last
+ // modified revision inside Events. For a delayed response to a unsynced
+ // watcher, the revision is greater than the last modified revision
+ // inside Events.
+ Revision int64
+
+ // CompactRevision is set when the watcher is cancelled due to compaction.
+ CompactRevision int64
+}
+
+// watchStream contains a collection of watchers that share
+// one streaming chan to send out watched events and other control events.
+type watchStream struct {
+ watchable watchable
+ ch chan WatchResponse
+
+ mu sync.Mutex // guards fields below it
+ // nextID is the ID pre-allocated for next new watcher in this stream
+ nextID WatchID
+ closed bool
+ cancels map[WatchID]cancelFunc
+ watchers map[WatchID]*watcher
+}
+
+// Watch creates a new watcher in the stream and returns its WatchID.
+func (ws *watchStream) Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) {
+ // prevent wrong range where key >= end lexicographically
+ // watch request with 'WithFromKey' has empty-byte range end
+ if len(end) != 0 && bytes.Compare(key, end) != -1 {
+ return -1, ErrEmptyWatcherRange
+ }
+
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ if ws.closed {
+ return -1, ErrEmptyWatcherRange
+ }
+
+ if id == AutoWatchID {
+ for ws.watchers[ws.nextID] != nil {
+ ws.nextID++
+ }
+ id = ws.nextID
+ ws.nextID++
+ } else if _, ok := ws.watchers[id]; ok {
+ return -1, ErrWatcherDuplicateID
+ }
+
+ w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
+
+ ws.cancels[id] = c
+ ws.watchers[id] = w
+ return id, nil
+}
+
+func (ws *watchStream) Chan() <-chan WatchResponse {
+ return ws.ch
+}
+
+func (ws *watchStream) Cancel(id WatchID) error {
+ ws.mu.Lock()
+ cancel, ok := ws.cancels[id]
+ w := ws.watchers[id]
+ ok = ok && !ws.closed
+ ws.mu.Unlock()
+
+ if !ok {
+ return ErrWatcherNotExist
+ }
+ cancel()
+
+ ws.mu.Lock()
+ // The watch isn't removed until cancel so that if Close() is called,
+ // it will wait for the cancel. Otherwise, Close() could close the
+ // watch channel while the store is still posting events.
+ if ww := ws.watchers[id]; ww == w {
+ delete(ws.cancels, id)
+ delete(ws.watchers, id)
+ }
+ ws.mu.Unlock()
+
+ return nil
+}
+
+func (ws *watchStream) Close() {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+
+ for _, cancel := range ws.cancels {
+ cancel()
+ }
+ ws.closed = true
+ close(ws.ch)
+ watchStreamGauge.Dec()
+}
+
+func (ws *watchStream) Rev() int64 {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ return ws.watchable.rev()
+}
+
+func (ws *watchStream) RequestProgress(id WatchID) {
+ ws.mu.Lock()
+ w, ok := ws.watchers[id]
+ ws.mu.Unlock()
+ if !ok {
+ return
+ }
+ ws.watchable.progress(w)
+}
diff --git a/vendor/go.etcd.io/etcd/mvcc/watcher_group.go b/vendor/go.etcd.io/etcd/mvcc/watcher_group.go
new file mode 100644
index 000000000000..151f0de71851
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/mvcc/watcher_group.go
@@ -0,0 +1,293 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "fmt"
+ "math"
+
+ "go.etcd.io/etcd/mvcc/mvccpb"
+ "go.etcd.io/etcd/pkg/adt"
+)
+
+var (
+ // watchBatchMaxRevs is the maximum distinct revisions that
+ // may be sent to an unsynced watcher at a time. Declared as
+ // var instead of const for testing purposes.
+ watchBatchMaxRevs = 1000
+)
+
+type eventBatch struct {
+ // evs is a batch of revision-ordered events
+ evs []mvccpb.Event
+ // revs is the minimum unique revisions observed for this batch
+ revs int
+ // moreRev is first revision with more events following this batch
+ moreRev int64
+}
+
+func (eb *eventBatch) add(ev mvccpb.Event) {
+ if eb.revs > watchBatchMaxRevs {
+ // maxed out batch size
+ return
+ }
+
+ if len(eb.evs) == 0 {
+ // base case
+ eb.revs = 1
+ eb.evs = append(eb.evs, ev)
+ return
+ }
+
+ // revision accounting
+ ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision
+ evRev := ev.Kv.ModRevision
+ if evRev > ebRev {
+ eb.revs++
+ if eb.revs > watchBatchMaxRevs {
+ eb.moreRev = evRev
+ return
+ }
+ }
+
+ eb.evs = append(eb.evs, ev)
+}
+
+type watcherBatch map[*watcher]*eventBatch
+
+func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) {
+ eb := wb[w]
+ if eb == nil {
+ eb = &eventBatch{}
+ wb[w] = eb
+ }
+ eb.add(ev)
+}
+
+// newWatcherBatch maps watchers to their matched events. It enables quick
+// events look up by watcher.
+func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
+ if len(wg.watchers) == 0 {
+ return nil
+ }
+
+ wb := make(watcherBatch)
+ for _, ev := range evs {
+ for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
+ if ev.Kv.ModRevision >= w.minRev {
+ // don't double notify
+ wb.add(w, ev)
+ }
+ }
+ }
+ return wb
+}
+
+type watcherSet map[*watcher]struct{}
+
+func (w watcherSet) add(wa *watcher) {
+ if _, ok := w[wa]; ok {
+ panic("add watcher twice!")
+ }
+ w[wa] = struct{}{}
+}
+
+func (w watcherSet) union(ws watcherSet) {
+ for wa := range ws {
+ w.add(wa)
+ }
+}
+
+func (w watcherSet) delete(wa *watcher) {
+ if _, ok := w[wa]; !ok {
+ panic("removing missing watcher!")
+ }
+ delete(w, wa)
+}
+
+type watcherSetByKey map[string]watcherSet
+
+func (w watcherSetByKey) add(wa *watcher) {
+ set := w[string(wa.key)]
+ if set == nil {
+ set = make(watcherSet)
+ w[string(wa.key)] = set
+ }
+ set.add(wa)
+}
+
+func (w watcherSetByKey) delete(wa *watcher) bool {
+ k := string(wa.key)
+ if v, ok := w[k]; ok {
+ if _, ok := v[wa]; ok {
+ delete(v, wa)
+ if len(v) == 0 {
+ // remove the set; nothing left
+ delete(w, k)
+ }
+ return true
+ }
+ }
+ return false
+}
+
+// watcherGroup is a collection of watchers organized by their ranges
+type watcherGroup struct {
+ // keyWatchers has the watchers that watch on a single key
+ keyWatchers watcherSetByKey
+ // ranges has the watchers that watch a range; it is sorted by interval
+ ranges adt.IntervalTree
+ // watchers is the set of all watchers
+ watchers watcherSet
+}
+
+func newWatcherGroup() watcherGroup {
+ return watcherGroup{
+ keyWatchers: make(watcherSetByKey),
+ ranges: adt.NewIntervalTree(),
+ watchers: make(watcherSet),
+ }
+}
+
+// add puts a watcher in the group.
+func (wg *watcherGroup) add(wa *watcher) {
+ wg.watchers.add(wa)
+ if wa.end == nil {
+ wg.keyWatchers.add(wa)
+ return
+ }
+
+ // interval already registered?
+ ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
+ if iv := wg.ranges.Find(ivl); iv != nil {
+ iv.Val.(watcherSet).add(wa)
+ return
+ }
+
+ // not registered, put in interval tree
+ ws := make(watcherSet)
+ ws.add(wa)
+ wg.ranges.Insert(ivl, ws)
+}
+
+// contains is whether the given key has a watcher in the group.
+func (wg *watcherGroup) contains(key string) bool {
+ _, ok := wg.keyWatchers[key]
+ return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key))
+}
+
+// size gives the number of unique watchers in the group.
+func (wg *watcherGroup) size() int { return len(wg.watchers) }
+
+// delete removes a watcher from the group.
+func (wg *watcherGroup) delete(wa *watcher) bool {
+ if _, ok := wg.watchers[wa]; !ok {
+ return false
+ }
+ wg.watchers.delete(wa)
+ if wa.end == nil {
+ wg.keyWatchers.delete(wa)
+ return true
+ }
+
+ ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
+ iv := wg.ranges.Find(ivl)
+ if iv == nil {
+ return false
+ }
+
+ ws := iv.Val.(watcherSet)
+ delete(ws, wa)
+ if len(ws) == 0 {
+ // remove interval missing watchers
+ if ok := wg.ranges.Delete(ivl); !ok {
+ panic("could not remove watcher from interval tree")
+ }
+ }
+
+ return true
+}
+
+// choose selects watchers from the watcher group to update
+func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) {
+ if len(wg.watchers) < maxWatchers {
+ return wg, wg.chooseAll(curRev, compactRev)
+ }
+ ret := newWatcherGroup()
+ for w := range wg.watchers {
+ if maxWatchers <= 0 {
+ break
+ }
+ maxWatchers--
+ ret.add(w)
+ }
+ return &ret, ret.chooseAll(curRev, compactRev)
+}
+
+func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 {
+ minRev := int64(math.MaxInt64)
+ for w := range wg.watchers {
+ if w.minRev > curRev {
+ // after network partition, possibly choosing future revision watcher from restore operation
+ // with watch key "proxy-namespace__lostleader" and revision "math.MaxInt64 - 2"
+ // do not panic when such watcher had been moved from "synced" watcher during restore operation
+ if !w.restore {
+ panic(fmt.Errorf("watcher minimum revision %d should not exceed current revision %d", w.minRev, curRev))
+ }
+
+ // mark 'restore' done, since it's chosen
+ w.restore = false
+ }
+ if w.minRev < compactRev {
+ select {
+ case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}:
+ w.compacted = true
+ wg.delete(w)
+ default:
+ // retry next time
+ }
+ continue
+ }
+ if minRev > w.minRev {
+ minRev = w.minRev
+ }
+ }
+ return minRev
+}
+
+// watcherSetByKey gets the set of watchers that receive events on the given key.
+func (wg *watcherGroup) watcherSetByKey(key string) watcherSet {
+ wkeys := wg.keyWatchers[key]
+ wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key))
+
+ // zero-copy cases
+ switch {
+ case len(wranges) == 0:
+ // no need to merge ranges or copy; reuse single-key set
+ return wkeys
+ case len(wranges) == 0 && len(wkeys) == 0:
+ return nil
+ case len(wranges) == 1 && len(wkeys) == 0:
+ return wranges[0].Val.(watcherSet)
+ }
+
+ // copy case
+ ret := make(watcherSet)
+ ret.union(wg.keyWatchers[key])
+ for _, item := range wranges {
+ ret.union(item.Val.(watcherSet))
+ }
+ return ret
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/adt/README.md b/vendor/go.etcd.io/etcd/pkg/adt/README.md
new file mode 100644
index 000000000000..a2089cd4b96c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/adt/README.md
@@ -0,0 +1,48 @@
+
+## Red-Black Tree
+
+*"Introduction to Algorithms" (Cormen et al, 3rd ed.), Chapter 13*
+
+1. Every node is either red or black.
+2. The root is black.
+3. Every leaf (NIL) is black.
+4. If a node is red, then both its children are black.
+5. For each node, all simple paths from the node to descendant leaves contain the
+same number of black nodes.
+
+For example,
+
+```go
+import (
+ "fmt"
+
+ "go.etcd.io/etcd/pkg/adt"
+)
+
+func main() {
+ ivt := adt.NewIntervalTree()
+ ivt.Insert(NewInt64Interval(510, 511), 0)
+ ivt.Insert(NewInt64Interval(82, 83), 0)
+ ivt.Insert(NewInt64Interval(830, 831), 0)
+ ...
+```
+
+After inserting the values `510`, `82`, `830`, `11`, `383`, `647`, `899`, `261`, `410`, `514`, `815`, `888`, `972`, `238`, `292`, `953`.
+
+![red-black-tree-01-insertion.png](img/red-black-tree-01-insertion.png)
+
+Deleting the node `514` should not trigger any rebalancing:
+
+![red-black-tree-02-delete-514.png](img/red-black-tree-02-delete-514.png)
+
+Deleting the node `11` triggers multiple rotates for rebalancing:
+
+![red-black-tree-03-delete-11.png](img/red-black-tree-03-delete-11.png)
+![red-black-tree-04-delete-11.png](img/red-black-tree-04-delete-11.png)
+![red-black-tree-05-delete-11.png](img/red-black-tree-05-delete-11.png)
+![red-black-tree-06-delete-11.png](img/red-black-tree-06-delete-11.png)
+![red-black-tree-07-delete-11.png](img/red-black-tree-07-delete-11.png)
+![red-black-tree-08-delete-11.png](img/red-black-tree-08-delete-11.png)
+![red-black-tree-09-delete-11.png](img/red-black-tree-09-delete-11.png)
+
+Try yourself at https://www.cs.usfca.edu/~galles/visualization/RedBlack.html.
diff --git a/vendor/go.etcd.io/etcd/pkg/adt/doc.go b/vendor/go.etcd.io/etcd/pkg/adt/doc.go
new file mode 100644
index 000000000000..1a9559145b31
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/adt/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package adt implements useful abstract data types.
+package adt
diff --git a/vendor/go.etcd.io/etcd/pkg/adt/interval_tree.go b/vendor/go.etcd.io/etcd/pkg/adt/interval_tree.go
new file mode 100644
index 000000000000..2e5b2ddb8827
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/adt/interval_tree.go
@@ -0,0 +1,951 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strings"
+)
+
+// Comparable is an interface for trichotomic comparisons.
+type Comparable interface {
+ // Compare gives the result of a 3-way comparison
+ // a.Compare(b) = 1 => a > b
+ // a.Compare(b) = 0 => a == b
+ // a.Compare(b) = -1 => a < b
+ Compare(c Comparable) int
+}
+
+type rbcolor int
+
+const (
+ black rbcolor = iota
+ red
+)
+
+func (c rbcolor) String() string {
+ switch c {
+ case black:
+ return "black"
+ case red:
+ return "black"
+ default:
+ panic(fmt.Errorf("unknown color %d", c))
+ }
+}
+
+// Interval implements a Comparable interval [begin, end)
+// TODO: support different sorts of intervals: (a,b), [a,b], (a, b]
+type Interval struct {
+ Begin Comparable
+ End Comparable
+}
+
+// Compare on an interval gives == if the interval overlaps.
+func (ivl *Interval) Compare(c Comparable) int {
+ ivl2 := c.(*Interval)
+ ivbCmpBegin := ivl.Begin.Compare(ivl2.Begin)
+ ivbCmpEnd := ivl.Begin.Compare(ivl2.End)
+ iveCmpBegin := ivl.End.Compare(ivl2.Begin)
+
+ // ivl is left of ivl2
+ if ivbCmpBegin < 0 && iveCmpBegin <= 0 {
+ return -1
+ }
+
+ // iv is right of iv2
+ if ivbCmpEnd >= 0 {
+ return 1
+ }
+
+ return 0
+}
+
+type intervalNode struct {
+ // iv is the interval-value pair entry.
+ iv IntervalValue
+ // max endpoint of all descendent nodes.
+ max Comparable
+ // left and right are sorted by low endpoint of key interval
+ left, right *intervalNode
+ // parent is the direct ancestor of the node
+ parent *intervalNode
+ c rbcolor
+}
+
+func (x *intervalNode) color(sentinel *intervalNode) rbcolor {
+ if x == sentinel {
+ return black
+ }
+ return x.c
+}
+
+func (x *intervalNode) height(sentinel *intervalNode) int {
+ if x == sentinel {
+ return 0
+ }
+ ld := x.left.height(sentinel)
+ rd := x.right.height(sentinel)
+ if ld < rd {
+ return rd + 1
+ }
+ return ld + 1
+}
+
+func (x *intervalNode) min(sentinel *intervalNode) *intervalNode {
+ for x.left != sentinel {
+ x = x.left
+ }
+ return x
+}
+
+// successor is the next in-order node in the tree
+func (x *intervalNode) successor(sentinel *intervalNode) *intervalNode {
+ if x.right != sentinel {
+ return x.right.min(sentinel)
+ }
+ y := x.parent
+ for y != sentinel && x == y.right {
+ x = y
+ y = y.parent
+ }
+ return y
+}
+
+// updateMax updates the maximum values for a node and its ancestors
+func (x *intervalNode) updateMax(sentinel *intervalNode) {
+ for x != sentinel {
+ oldmax := x.max
+ max := x.iv.Ivl.End
+ if x.left != sentinel && x.left.max.Compare(max) > 0 {
+ max = x.left.max
+ }
+ if x.right != sentinel && x.right.max.Compare(max) > 0 {
+ max = x.right.max
+ }
+ if oldmax.Compare(max) == 0 {
+ break
+ }
+ x.max = max
+ x = x.parent
+ }
+}
+
+type nodeVisitor func(n *intervalNode) bool
+
+// visit will call a node visitor on each node that overlaps the given interval
+func (x *intervalNode) visit(iv *Interval, sentinel *intervalNode, nv nodeVisitor) bool {
+ if x == sentinel {
+ return true
+ }
+ v := iv.Compare(&x.iv.Ivl)
+ switch {
+ case v < 0:
+ if !x.left.visit(iv, sentinel, nv) {
+ return false
+ }
+ case v > 0:
+ maxiv := Interval{x.iv.Ivl.Begin, x.max}
+ if maxiv.Compare(iv) == 0 {
+ if !x.left.visit(iv, sentinel, nv) || !x.right.visit(iv, sentinel, nv) {
+ return false
+ }
+ }
+ default:
+ if !x.left.visit(iv, sentinel, nv) || !nv(x) || !x.right.visit(iv, sentinel, nv) {
+ return false
+ }
+ }
+ return true
+}
+
+// IntervalValue represents a range tree node that contains a range and a value.
+type IntervalValue struct {
+ Ivl Interval
+ Val interface{}
+}
+
+// IntervalTree represents a (mostly) textbook implementation of the
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.) chapter 13 red-black tree
+// and chapter 14.3 interval tree with search supporting "stabbing queries".
+type IntervalTree interface {
+ // Insert adds a node with the given interval into the tree.
+ Insert(ivl Interval, val interface{})
+ // Delete removes the node with the given interval from the tree, returning
+ // true if a node is in fact removed.
+ Delete(ivl Interval) bool
+ // Len gives the number of elements in the tree.
+ Len() int
+ // Height is the number of levels in the tree; one node has height 1.
+ Height() int
+ // MaxHeight is the expected maximum tree height given the number of nodes.
+ MaxHeight() int
+ // Visit calls a visitor function on every tree node intersecting the given interval.
+ // It will visit each interval [x, y) in ascending order sorted on x.
+ Visit(ivl Interval, ivv IntervalVisitor)
+ // Find gets the IntervalValue for the node matching the given interval
+ Find(ivl Interval) *IntervalValue
+ // Intersects returns true if there is some tree node intersecting the given interval.
+ Intersects(iv Interval) bool
+ // Contains returns true if the interval tree's keys cover the entire given interval.
+ Contains(ivl Interval) bool
+ // Stab returns a slice with all elements in the tree intersecting the interval.
+ Stab(iv Interval) []*IntervalValue
+ // Union merges a given interval tree into the receiver.
+ Union(inIvt IntervalTree, ivl Interval)
+}
+
+// NewIntervalTree returns a new interval tree.
+func NewIntervalTree() IntervalTree {
+ sentinel := &intervalNode{
+ iv: IntervalValue{},
+ max: nil,
+ left: nil,
+ right: nil,
+ parent: nil,
+ c: black,
+ }
+ return &intervalTree{
+ root: sentinel,
+ count: 0,
+ sentinel: sentinel,
+ }
+}
+
+type intervalTree struct {
+ root *intervalNode
+ count int
+
+ // red-black NIL node
+ // use 'sentinel' as a dummy object to simplify boundary conditions
+ // use the sentinel to treat a nil child of a node x as an ordinary node whose parent is x
+ // use one shared sentinel to represent all nil leaves and the root's parent
+ sentinel *intervalNode
+}
+
+// TODO: make this consistent with textbook implementation
+//
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324
+//
+// 0. RB-DELETE(T, z)
+// 1.
+// 2. y = z
+// 3. y-original-color = y.color
+// 4.
+// 5. if z.left == T.nil
+// 6. x = z.right
+// 7. RB-TRANSPLANT(T, z, z.right)
+// 8. else if z.right == T.nil
+// 9. x = z.left
+// 10. RB-TRANSPLANT(T, z, z.left)
+// 11. else
+// 12. y = TREE-MINIMUM(z.right)
+// 13. y-original-color = y.color
+// 14. x = y.right
+// 15. if y.p == z
+// 16. x.p = y
+// 17. else
+// 18. RB-TRANSPLANT(T, y, y.right)
+// 19. y.right = z.right
+// 20. y.right.p = y
+// 21. RB-TRANSPLANT(T, z, y)
+// 22. y.left = z.left
+// 23. y.left.p = y
+// 24. y.color = z.color
+// 25.
+// 26. if y-original-color == BLACK
+// 27. RB-DELETE-FIXUP(T, x)
+
+// Delete removes the node with the given interval from the tree, returning
+// true if a node is in fact removed.
+func (ivt *intervalTree) Delete(ivl Interval) bool {
+ z := ivt.find(ivl)
+ if z == ivt.sentinel {
+ return false
+ }
+
+ y := z
+ if z.left != ivt.sentinel && z.right != ivt.sentinel {
+ y = z.successor(ivt.sentinel)
+ }
+
+ x := ivt.sentinel
+ if y.left != ivt.sentinel {
+ x = y.left
+ } else if y.right != ivt.sentinel {
+ x = y.right
+ }
+
+ x.parent = y.parent
+
+ if y.parent == ivt.sentinel {
+ ivt.root = x
+ } else {
+ if y == y.parent.left {
+ y.parent.left = x
+ } else {
+ y.parent.right = x
+ }
+ y.parent.updateMax(ivt.sentinel)
+ }
+ if y != z {
+ z.iv = y.iv
+ z.updateMax(ivt.sentinel)
+ }
+
+ if y.color(ivt.sentinel) == black {
+ ivt.deleteFixup(x)
+ }
+
+ ivt.count--
+ return true
+}
+
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p326
+//
+// 0. RB-DELETE-FIXUP(T, z)
+// 1.
+// 2. while x ≠ T.root and x.color == BLACK
+// 3. if x == x.p.left
+// 4. w = x.p.right
+// 5. if w.color == RED
+// 6. w.color = BLACK
+// 7. x.p.color = RED
+// 8. LEFT-ROTATE(T, x, p)
+// 9. if w.left.color == BLACK and w.right.color == BLACK
+// 10. w.color = RED
+// 11. x = x.p
+// 12. else if w.right.color == BLACK
+// 13. w.left.color = BLACK
+// 14. w.color = RED
+// 15. RIGHT-ROTATE(T, w)
+// 16. w = w.p.right
+// 17. w.color = x.p.color
+// 18. x.p.color = BLACK
+// 19. LEFT-ROTATE(T, w.p)
+// 20. x = T.root
+// 21. else
+// 22. w = x.p.left
+// 23. if w.color == RED
+// 24. w.color = BLACK
+// 25. x.p.color = RED
+// 26. RIGHT-ROTATE(T, x, p)
+// 27. if w.right.color == BLACK and w.left.color == BLACK
+// 28. w.color = RED
+// 29. x = x.p
+// 30. else if w.left.color == BLACK
+// 31. w.right.color = BLACK
+// 32. w.color = RED
+// 33. LEFT-ROTATE(T, w)
+// 34. w = w.p.left
+// 35. w.color = x.p.color
+// 36. x.p.color = BLACK
+// 37. RIGHT-ROTATE(T, w.p)
+// 38. x = T.root
+// 39.
+// 40. x.color = BLACK
+//
+func (ivt *intervalTree) deleteFixup(x *intervalNode) {
+ for x != ivt.root && x.color(ivt.sentinel) == black {
+ if x == x.parent.left { // line 3-20
+ w := x.parent.right
+ if w.color(ivt.sentinel) == red {
+ w.c = black
+ x.parent.c = red
+ ivt.rotateLeft(x.parent)
+ w = x.parent.right
+ }
+ if w == nil {
+ break
+ }
+ if w.left.color(ivt.sentinel) == black && w.right.color(ivt.sentinel) == black {
+ w.c = red
+ x = x.parent
+ } else {
+ if w.right.color(ivt.sentinel) == black {
+ w.left.c = black
+ w.c = red
+ ivt.rotateRight(w)
+ w = x.parent.right
+ }
+ w.c = x.parent.color(ivt.sentinel)
+ x.parent.c = black
+ w.right.c = black
+ ivt.rotateLeft(x.parent)
+ x = ivt.root
+ }
+ } else { // line 22-38
+ // same as above but with left and right exchanged
+ w := x.parent.left
+ if w.color(ivt.sentinel) == red {
+ w.c = black
+ x.parent.c = red
+ ivt.rotateRight(x.parent)
+ w = x.parent.left
+ }
+ if w == nil {
+ break
+ }
+ if w.left.color(ivt.sentinel) == black && w.right.color(ivt.sentinel) == black {
+ w.c = red
+ x = x.parent
+ } else {
+ if w.left.color(ivt.sentinel) == black {
+ w.right.c = black
+ w.c = red
+ ivt.rotateLeft(w)
+ w = x.parent.left
+ }
+ w.c = x.parent.color(ivt.sentinel)
+ x.parent.c = black
+ w.left.c = black
+ ivt.rotateRight(x.parent)
+ x = ivt.root
+ }
+ }
+ }
+
+ if x != nil {
+ x.c = black
+ }
+}
+
+func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *intervalNode {
+ return &intervalNode{
+ iv: IntervalValue{ivl, val},
+ max: ivl.End,
+ c: red,
+ left: ivt.sentinel,
+ right: ivt.sentinel,
+ parent: ivt.sentinel,
+ }
+}
+
+// TODO: make this consistent with textbook implementation
+//
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315
+//
+// 0. RB-INSERT(T, z)
+// 1.
+// 2. y = T.nil
+// 3. x = T.root
+// 4.
+// 5. while x ≠ T.nil
+// 6. y = x
+// 7. if z.key < x.key
+// 8. x = x.left
+// 9. else
+// 10. x = x.right
+// 11.
+// 12. z.p = y
+// 13.
+// 14. if y == T.nil
+// 15. T.root = z
+// 16. else if z.key < y.key
+// 17. y.left = z
+// 18. else
+// 19. y.right = z
+// 20.
+// 21. z.left = T.nil
+// 22. z.right = T.nil
+// 23. z.color = RED
+// 24.
+// 25. RB-INSERT-FIXUP(T, z)
+
+// Insert adds a node with the given interval into the tree.
+func (ivt *intervalTree) Insert(ivl Interval, val interface{}) {
+ y := ivt.sentinel
+ z := ivt.createIntervalNode(ivl, val)
+ x := ivt.root
+ for x != ivt.sentinel {
+ y = x
+ if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 {
+ x = x.left
+ } else {
+ x = x.right
+ }
+ }
+
+ z.parent = y
+ if y == ivt.sentinel {
+ ivt.root = z
+ } else {
+ if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 {
+ y.left = z
+ } else {
+ y.right = z
+ }
+ y.updateMax(ivt.sentinel)
+ }
+ z.c = red
+
+ ivt.insertFixup(z)
+ ivt.count++
+}
+
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p316
+//
+// 0. RB-INSERT-FIXUP(T, z)
+// 1.
+// 2. while z.p.color == RED
+// 3. if z.p == z.p.p.left
+// 4. y = z.p.p.right
+// 5. if y.color == RED
+// 6. z.p.color = BLACK
+// 7. y.color = BLACK
+// 8. z.p.p.color = RED
+// 9. z = z.p.p
+// 10. else if z == z.p.right
+// 11. z = z.p
+// 12. LEFT-ROTATE(T, z)
+// 13. z.p.color = BLACK
+// 14. z.p.p.color = RED
+// 15. RIGHT-ROTATE(T, z.p.p)
+// 16. else
+// 17. y = z.p.p.left
+// 18. if y.color == RED
+// 19. z.p.color = BLACK
+// 20. y.color = BLACK
+// 21. z.p.p.color = RED
+// 22. z = z.p.p
+// 23. else if z == z.p.right
+// 24. z = z.p
+// 25. RIGHT-ROTATE(T, z)
+// 26. z.p.color = BLACK
+// 27. z.p.p.color = RED
+// 28. LEFT-ROTATE(T, z.p.p)
+// 29.
+// 30. T.root.color = BLACK
+//
+func (ivt *intervalTree) insertFixup(z *intervalNode) {
+ for z.parent.color(ivt.sentinel) == red {
+ if z.parent == z.parent.parent.left { // line 3-15
+
+ y := z.parent.parent.right
+ if y.color(ivt.sentinel) == red {
+ y.c = black
+ z.parent.c = black
+ z.parent.parent.c = red
+ z = z.parent.parent
+ } else {
+ if z == z.parent.right {
+ z = z.parent
+ ivt.rotateLeft(z)
+ }
+ z.parent.c = black
+ z.parent.parent.c = red
+ ivt.rotateRight(z.parent.parent)
+ }
+ } else { // line 16-28
+ // same as then with left/right exchanged
+ y := z.parent.parent.left
+ if y.color(ivt.sentinel) == red {
+ y.c = black
+ z.parent.c = black
+ z.parent.parent.c = red
+ z = z.parent.parent
+ } else {
+ if z == z.parent.left {
+ z = z.parent
+ ivt.rotateRight(z)
+ }
+ z.parent.c = black
+ z.parent.parent.c = red
+ ivt.rotateLeft(z.parent.parent)
+ }
+ }
+ }
+
+ // line 30
+ ivt.root.c = black
+}
+
+// rotateLeft moves x so it is left of its right child
+//
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.2, p313
+//
+// 0. LEFT-ROTATE(T, x)
+// 1.
+// 2. y = x.right
+// 3. x.right = y.left
+// 4.
+// 5. if y.left ≠ T.nil
+// 6. y.left.p = x
+// 7.
+// 8. y.p = x.p
+// 9.
+// 10. if x.p == T.nil
+// 11. T.root = y
+// 12. else if x == x.p.left
+// 13. x.p.left = y
+// 14. else
+// 15. x.p.right = y
+// 16.
+// 17. y.left = x
+// 18. x.p = y
+//
+func (ivt *intervalTree) rotateLeft(x *intervalNode) {
+ // rotateLeft x must have right child
+ if x.right == ivt.sentinel {
+ return
+ }
+
+ // line 2-3
+ y := x.right
+ x.right = y.left
+
+ // line 5-6
+ if y.left != ivt.sentinel {
+ y.left.parent = x
+ }
+ x.updateMax(ivt.sentinel)
+
+ // line 10-15, 18
+ ivt.replaceParent(x, y)
+
+ // line 17
+ y.left = x
+ y.updateMax(ivt.sentinel)
+}
+
+// rotateRight moves x so it is right of its left child
+//
+// 0. RIGHT-ROTATE(T, x)
+// 1.
+// 2. y = x.left
+// 3. x.left = y.right
+// 4.
+// 5. if y.right ≠ T.nil
+// 6. y.right.p = x
+// 7.
+// 8. y.p = x.p
+// 9.
+// 10. if x.p == T.nil
+// 11. T.root = y
+// 12. else if x == x.p.right
+// 13. x.p.right = y
+// 14. else
+// 15. x.p.left = y
+// 16.
+// 17. y.right = x
+// 18. x.p = y
+//
+func (ivt *intervalTree) rotateRight(x *intervalNode) {
+ // rotateRight x must have left child
+ if x.left == ivt.sentinel {
+ return
+ }
+
+ // line 2-3
+ y := x.left
+ x.left = y.right
+
+ // line 5-6
+ if y.right != ivt.sentinel {
+ y.right.parent = x
+ }
+ x.updateMax(ivt.sentinel)
+
+ // line 10-15, 18
+ ivt.replaceParent(x, y)
+
+ // line 17
+ y.right = x
+ y.updateMax(ivt.sentinel)
+}
+
+// replaceParent replaces x's parent with y
+func (ivt *intervalTree) replaceParent(x *intervalNode, y *intervalNode) {
+ y.parent = x.parent
+ if x.parent == ivt.sentinel {
+ ivt.root = y
+ } else {
+ if x == x.parent.left {
+ x.parent.left = y
+ } else {
+ x.parent.right = y
+ }
+ x.parent.updateMax(ivt.sentinel)
+ }
+ x.parent = y
+}
+
+// Len gives the number of elements in the tree
+func (ivt *intervalTree) Len() int { return ivt.count }
+
+// Height is the number of levels in the tree; one node has height 1.
+func (ivt *intervalTree) Height() int { return ivt.root.height(ivt.sentinel) }
+
+// MaxHeight is the expected maximum tree height given the number of nodes
+func (ivt *intervalTree) MaxHeight() int {
+ return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5)
+}
+
+// IntervalVisitor is used on tree searches; return false to stop searching.
+type IntervalVisitor func(n *IntervalValue) bool
+
+// Visit calls a visitor function on every tree node intersecting the given interval.
+// It will visit each interval [x, y) in ascending order sorted on x.
+func (ivt *intervalTree) Visit(ivl Interval, ivv IntervalVisitor) {
+ ivt.root.visit(&ivl, ivt.sentinel, func(n *intervalNode) bool { return ivv(&n.iv) })
+}
+
+// find the exact node for a given interval
+func (ivt *intervalTree) find(ivl Interval) *intervalNode {
+ ret := ivt.sentinel
+ f := func(n *intervalNode) bool {
+ if n.iv.Ivl != ivl {
+ return true
+ }
+ ret = n
+ return false
+ }
+ ivt.root.visit(&ivl, ivt.sentinel, f)
+ return ret
+}
+
+// Find gets the IntervalValue for the node matching the given interval
+func (ivt *intervalTree) Find(ivl Interval) (ret *IntervalValue) {
+ n := ivt.find(ivl)
+ if n == ivt.sentinel {
+ return nil
+ }
+ return &n.iv
+}
+
+// Intersects returns true if there is some tree node intersecting the given interval.
+func (ivt *intervalTree) Intersects(iv Interval) bool {
+ x := ivt.root
+ for x != ivt.sentinel && iv.Compare(&x.iv.Ivl) != 0 {
+ if x.left != ivt.sentinel && x.left.max.Compare(iv.Begin) > 0 {
+ x = x.left
+ } else {
+ x = x.right
+ }
+ }
+ return x != ivt.sentinel
+}
+
+// Contains returns true if the interval tree's keys cover the entire given interval.
+func (ivt *intervalTree) Contains(ivl Interval) bool {
+ var maxEnd, minBegin Comparable
+
+ isContiguous := true
+ ivt.Visit(ivl, func(n *IntervalValue) bool {
+ if minBegin == nil {
+ minBegin = n.Ivl.Begin
+ maxEnd = n.Ivl.End
+ return true
+ }
+ if maxEnd.Compare(n.Ivl.Begin) < 0 {
+ isContiguous = false
+ return false
+ }
+ if n.Ivl.End.Compare(maxEnd) > 0 {
+ maxEnd = n.Ivl.End
+ }
+ return true
+ })
+
+ return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0
+}
+
+// Stab returns a slice with all elements in the tree intersecting the interval.
+func (ivt *intervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
+ if ivt.count == 0 {
+ return nil
+ }
+ f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true }
+ ivt.Visit(iv, f)
+ return ivs
+}
+
+// Union merges a given interval tree into the receiver.
+func (ivt *intervalTree) Union(inIvt IntervalTree, ivl Interval) {
+ f := func(n *IntervalValue) bool {
+ ivt.Insert(n.Ivl, n.Val)
+ return true
+ }
+ inIvt.Visit(ivl, f)
+}
+
+type visitedInterval struct {
+ root Interval
+ left Interval
+ right Interval
+ color rbcolor
+ depth int
+}
+
+func (vi visitedInterval) String() string {
+ bd := new(strings.Builder)
+ bd.WriteString(fmt.Sprintf("root [%v,%v,%v], left [%v,%v], right [%v,%v], depth %d",
+ vi.root.Begin, vi.root.End, vi.color,
+ vi.left.Begin, vi.left.End,
+ vi.right.Begin, vi.right.End,
+ vi.depth,
+ ))
+ return bd.String()
+}
+
+// visitLevel traverses tree in level order.
+// used for testing
+func (ivt *intervalTree) visitLevel() []visitedInterval {
+ if ivt.root == ivt.sentinel {
+ return nil
+ }
+
+ rs := make([]visitedInterval, 0, ivt.Len())
+
+ type pair struct {
+ node *intervalNode
+ depth int
+ }
+ queue := []pair{{ivt.root, 0}}
+ for len(queue) > 0 {
+ f := queue[0]
+ queue = queue[1:]
+
+ vi := visitedInterval{
+ root: f.node.iv.Ivl,
+ color: f.node.color(ivt.sentinel),
+ depth: f.depth,
+ }
+ if f.node.left != ivt.sentinel {
+ vi.left = f.node.left.iv.Ivl
+ queue = append(queue, pair{f.node.left, f.depth + 1})
+ }
+ if f.node.right != ivt.sentinel {
+ vi.right = f.node.right.iv.Ivl
+ queue = append(queue, pair{f.node.right, f.depth + 1})
+ }
+
+ rs = append(rs, vi)
+ }
+
+ return rs
+}
+
+type StringComparable string
+
+func (s StringComparable) Compare(c Comparable) int {
+ sc := c.(StringComparable)
+ if s < sc {
+ return -1
+ }
+ if s > sc {
+ return 1
+ }
+ return 0
+}
+
+func NewStringInterval(begin, end string) Interval {
+ return Interval{StringComparable(begin), StringComparable(end)}
+}
+
+func NewStringPoint(s string) Interval {
+ return Interval{StringComparable(s), StringComparable(s + "\x00")}
+}
+
+// StringAffineComparable treats "" as > all other strings
+type StringAffineComparable string
+
+func (s StringAffineComparable) Compare(c Comparable) int {
+ sc := c.(StringAffineComparable)
+
+ if len(s) == 0 {
+ if len(sc) == 0 {
+ return 0
+ }
+ return 1
+ }
+ if len(sc) == 0 {
+ return -1
+ }
+
+ if s < sc {
+ return -1
+ }
+ if s > sc {
+ return 1
+ }
+ return 0
+}
+
+func NewStringAffineInterval(begin, end string) Interval {
+ return Interval{StringAffineComparable(begin), StringAffineComparable(end)}
+}
+
+func NewStringAffinePoint(s string) Interval {
+ return NewStringAffineInterval(s, s+"\x00")
+}
+
+func NewInt64Interval(a int64, b int64) Interval {
+ return Interval{Int64Comparable(a), Int64Comparable(b)}
+}
+
+func newInt64EmptyInterval() Interval {
+ return Interval{Begin: nil, End: nil}
+}
+
+func NewInt64Point(a int64) Interval {
+ return Interval{Int64Comparable(a), Int64Comparable(a + 1)}
+}
+
+type Int64Comparable int64
+
+func (v Int64Comparable) Compare(c Comparable) int {
+ vc := c.(Int64Comparable)
+ cmp := v - vc
+ if cmp < 0 {
+ return -1
+ }
+ if cmp > 0 {
+ return 1
+ }
+ return 0
+}
+
+// BytesAffineComparable treats empty byte arrays as > all other byte arrays
+type BytesAffineComparable []byte
+
+func (b BytesAffineComparable) Compare(c Comparable) int {
+ bc := c.(BytesAffineComparable)
+
+ if len(b) == 0 {
+ if len(bc) == 0 {
+ return 0
+ }
+ return 1
+ }
+ if len(bc) == 0 {
+ return -1
+ }
+
+ return bytes.Compare(b, bc)
+}
+
+func NewBytesAffineInterval(begin, end []byte) Interval {
+ return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)}
+}
+
+func NewBytesAffinePoint(b []byte) Interval {
+ be := make([]byte, len(b)+1)
+ copy(be, b)
+ be[len(b)] = 0
+ return NewBytesAffineInterval(b, be)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/contention/contention.go b/vendor/go.etcd.io/etcd/pkg/contention/contention.go
new file mode 100644
index 000000000000..26ce9a2f3473
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/contention/contention.go
@@ -0,0 +1,69 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package contention
+
+import (
+ "sync"
+ "time"
+)
+
+// TimeoutDetector detects routine starvations by
+// observing the actual time duration to finish an action
+// or between two events that should happen in a fixed
+// interval. If the observed duration is longer than
+// the expectation, the detector will report the result.
+type TimeoutDetector struct {
+ mu sync.Mutex // protects all
+ maxDuration time.Duration
+ // map from event to time
+ // time is the last seen time of the event.
+ records map[uint64]time.Time
+}
+
+// NewTimeoutDetector creates the TimeoutDetector.
+func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector {
+ return &TimeoutDetector{
+ maxDuration: maxDuration,
+ records: make(map[uint64]time.Time),
+ }
+}
+
+// Reset resets the NewTimeoutDetector.
+func (td *TimeoutDetector) Reset() {
+ td.mu.Lock()
+ defer td.mu.Unlock()
+
+ td.records = make(map[uint64]time.Time)
+}
+
+// Observe observes an event for given id. It returns false and exceeded duration
+// if the interval is longer than the expectation.
+func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) {
+ td.mu.Lock()
+ defer td.mu.Unlock()
+
+ ok := true
+ now := time.Now()
+ exceed := time.Duration(0)
+
+ if pt, found := td.records[which]; found {
+ exceed = now.Sub(pt) - td.maxDuration
+ if exceed > 0 {
+ ok = false
+ }
+ }
+ td.records[which] = now
+ return ok, exceed
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/contention/doc.go b/vendor/go.etcd.io/etcd/pkg/contention/doc.go
new file mode 100644
index 000000000000..daf452219e07
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/contention/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package contention provides facilities for detecting system contention.
+package contention
diff --git a/vendor/go.etcd.io/etcd/pkg/cpuutil/doc.go b/vendor/go.etcd.io/etcd/pkg/cpuutil/doc.go
new file mode 100644
index 000000000000..0323b2d34c64
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/cpuutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cpuutil provides facilities for detecting cpu-specific features.
+package cpuutil
diff --git a/vendor/go.etcd.io/etcd/pkg/cpuutil/endian.go b/vendor/go.etcd.io/etcd/pkg/cpuutil/endian.go
new file mode 100644
index 000000000000..06c06cd4a5fa
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/cpuutil/endian.go
@@ -0,0 +1,36 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cpuutil
+
+import (
+ "encoding/binary"
+ "unsafe"
+)
+
+const intWidth int = int(unsafe.Sizeof(0))
+
+var byteOrder binary.ByteOrder
+
+// ByteOrder returns the byte order for the CPU's native endianness.
+func ByteOrder() binary.ByteOrder { return byteOrder }
+
+func init() {
+ i := int(0x1)
+ if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
+ byteOrder = binary.BigEndian
+ } else {
+ byteOrder = binary.LittleEndian
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/crc/crc.go b/vendor/go.etcd.io/etcd/pkg/crc/crc.go
new file mode 100644
index 000000000000..4b998a48455f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/crc/crc.go
@@ -0,0 +1,43 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crc provides utility function for cyclic redundancy check
+// algorithms.
+package crc
+
+import (
+ "hash"
+ "hash/crc32"
+)
+
+// The size of a CRC-32 checksum in bytes.
+const Size = 4
+
+type digest struct {
+ crc uint32
+ tab *crc32.Table
+}
+
+// New creates a new hash.Hash32 computing the CRC-32 checksum
+// using the polynomial represented by the Table.
+// Modified by xiangli to take a prevcrc.
+func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} }
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return 1 }
+
+func (d *digest) Reset() { d.crc = 0 }
+
+func (d *digest) Write(p []byte) (n int, err error) {
+ d.crc = crc32.Update(d.crc, d.tab, p)
+ return len(p), nil
+}
+
+func (d *digest) Sum32() uint32 { return d.crc }
+
+func (d *digest) Sum(in []byte) []byte {
+ s := d.Sum32()
+ return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/debugutil/doc.go b/vendor/go.etcd.io/etcd/pkg/debugutil/doc.go
new file mode 100644
index 000000000000..74499eb2737e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/debugutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package debugutil includes utility functions for debugging.
+package debugutil
diff --git a/vendor/go.etcd.io/etcd/pkg/debugutil/pprof.go b/vendor/go.etcd.io/etcd/pkg/debugutil/pprof.go
new file mode 100644
index 000000000000..8d5544a3dcac
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/debugutil/pprof.go
@@ -0,0 +1,47 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debugutil
+
+import (
+ "net/http"
+ "net/http/pprof"
+ "runtime"
+)
+
+const HTTPPrefixPProf = "/debug/pprof"
+
+// PProfHandlers returns a map of pprof handlers keyed by the HTTP path.
+func PProfHandlers() map[string]http.Handler {
+ // set only when there's no existing setting
+ if runtime.SetMutexProfileFraction(-1) == 0 {
+ // 1 out of 5 mutex events are reported, on average
+ runtime.SetMutexProfileFraction(5)
+ }
+
+ m := make(map[string]http.Handler)
+
+ m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index)
+ m[HTTPPrefixPProf+"/profile"] = http.HandlerFunc(pprof.Profile)
+ m[HTTPPrefixPProf+"/symbol"] = http.HandlerFunc(pprof.Symbol)
+ m[HTTPPrefixPProf+"/cmdline"] = http.HandlerFunc(pprof.Cmdline)
+ m[HTTPPrefixPProf+"/trace "] = http.HandlerFunc(pprof.Trace)
+ m[HTTPPrefixPProf+"/heap"] = pprof.Handler("heap")
+ m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine")
+ m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate")
+ m[HTTPPrefixPProf+"/block"] = pprof.Handler("block")
+ m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex")
+
+ return m
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go
new file mode 100644
index 000000000000..58a77dfc1a99
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_unix.go
@@ -0,0 +1,22 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package fileutil
+
+import "os"
+
+// OpenDir opens a directory for syncing.
+func OpenDir(path string) (*os.File, error) { return os.Open(path) }
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go
new file mode 100644
index 000000000000..c123395c0040
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/dir_windows.go
@@ -0,0 +1,46 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+// OpenDir opens a directory in windows with write access for syncing.
+func OpenDir(path string) (*os.File, error) {
+ fd, err := openDir(path)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), path), nil
+}
+
+func openDir(path string) (fd syscall.Handle, err error) {
+ if len(path) == 0 {
+ return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return syscall.InvalidHandle, err
+ }
+ access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
+ sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
+ createmode := uint32(syscall.OPEN_EXISTING)
+ fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
+ return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go
new file mode 100644
index 000000000000..69dde5a7dd8e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fileutil implements utility functions related to files and paths.
+package fileutil
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go
new file mode 100644
index 000000000000..5d9fb5303955
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/fileutil.go
@@ -0,0 +1,104 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/coreos/pkg/capnslog"
+)
+
+const (
+ // PrivateFileMode grants owner to read/write a file.
+ PrivateFileMode = 0600
+ // PrivateDirMode grants owner to make/remove files inside the directory.
+ PrivateDirMode = 0700
+)
+
+var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/fileutil")
+
+// IsDirWriteable checks if dir is writable by writing and removing a file
+// to dir. It returns nil if dir is writable.
+func IsDirWriteable(dir string) error {
+ f := filepath.Join(dir, ".touch")
+ if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
+ return err
+ }
+ return os.Remove(f)
+}
+
+// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
+// does not exists. TouchDirAll also ensures the given directory is writable.
+func TouchDirAll(dir string) error {
+ // If path is already a directory, MkdirAll does nothing
+ // and returns nil.
+ err := os.MkdirAll(dir, PrivateDirMode)
+ if err != nil {
+ // if mkdirAll("a/text") and "text" is not
+ // a directory, this will return syscall.ENOTDIR
+ return err
+ }
+ return IsDirWriteable(dir)
+}
+
+// CreateDirAll is similar to TouchDirAll but returns error
+// if the deepest directory was not empty.
+func CreateDirAll(dir string) error {
+ err := TouchDirAll(dir)
+ if err == nil {
+ var ns []string
+ ns, err = ReadDir(dir)
+ if err != nil {
+ return err
+ }
+ if len(ns) != 0 {
+ err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
+ }
+ }
+ return err
+}
+
+// Exist returns true if a file or directory exists.
+func Exist(name string) bool {
+ _, err := os.Stat(name)
+ return err == nil
+}
+
+// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily
+// shorten the length of the file.
+func ZeroToEnd(f *os.File) error {
+ // TODO: support FALLOC_FL_ZERO_RANGE
+ off, err := f.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ lenf, lerr := f.Seek(0, io.SeekEnd)
+ if lerr != nil {
+ return lerr
+ }
+ if err = f.Truncate(off); err != nil {
+ return err
+ }
+ // make sure blocks remain allocated
+ if err = Preallocate(f, lenf, true); err != nil {
+ return err
+ }
+ _, err = f.Seek(off, io.SeekStart)
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go
new file mode 100644
index 000000000000..338627f43c88
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock.go
@@ -0,0 +1,26 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+)
+
+var (
+ ErrLocked = errors.New("fileutil: file already locked")
+)
+
+type LockedFile struct{ *os.File }
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go
new file mode 100644
index 000000000000..542550bc8a96
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_flock.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!plan9,!solaris
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
+ f.Close()
+ if err == syscall.EWOULDBLOCK {
+ err = ErrLocked
+ }
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go
new file mode 100644
index 000000000000..b0abc98eeb00
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_linux.go
@@ -0,0 +1,97 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "syscall"
+)
+
+// This used to call syscall.Flock() but that call fails with EBADF on NFS.
+// An alternative is lockf() which works on NFS but that call lets a process lock
+// the same file twice. Instead, use Linux's non-standard open file descriptor
+// locks which will block if the process already holds the file lock.
+//
+// constants from /usr/include/bits/fcntl-linux.h
+const (
+ F_OFD_GETLK = 37
+ F_OFD_SETLK = 37
+ F_OFD_SETLKW = 38
+)
+
+var (
+ wrlck = syscall.Flock_t{
+ Type: syscall.F_WRLCK,
+ Whence: int16(io.SeekStart),
+ Start: 0,
+ Len: 0,
+ }
+
+ linuxTryLockFile = flockTryLockFile
+ linuxLockFile = flockLockFile
+)
+
+func init() {
+ // use open file descriptor locks if the system supports it
+ getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
+ if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil {
+ linuxTryLockFile = ofdTryLockFile
+ linuxLockFile = ofdLockFile
+ }
+}
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ return linuxTryLockFile(path, flag, perm)
+}
+
+func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%v)", path, err)
+ }
+
+ flock := wrlck
+ if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil {
+ f.Close()
+ if err == syscall.EWOULDBLOCK {
+ err = ErrLocked
+ }
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ return linuxLockFile(path, flag, perm)
+}
+
+func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, fmt.Errorf("ofdLockFile failed to open %q (%v)", path, err)
+ }
+
+ flock := wrlck
+ err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock)
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go
new file mode 100644
index 000000000000..fee6a7c8f466
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_plan9.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
+ return nil, err
+ }
+ f, err := os.Open(path, flag, perm)
+ if err != nil {
+ return nil, ErrLocked
+ }
+ return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
+ return nil, err
+ }
+ for {
+ f, err := os.OpenFile(path, flag, perm)
+ if err == nil {
+ return &LockedFile{f}, nil
+ }
+ time.Sleep(10 * time.Millisecond)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go
new file mode 100644
index 000000000000..352ca5590d13
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_solaris.go
@@ -0,0 +1,62 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build solaris
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Pid = 0
+ lock.Type = syscall.F_WRLCK
+ lock.Whence = 0
+ lock.Pid = 0
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil {
+ f.Close()
+ if err == syscall.EAGAIN {
+ err = ErrLocked
+ }
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Pid = 0
+ lock.Type = syscall.F_WRLCK
+ lock.Whence = 0
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go
new file mode 100644
index 000000000000..ed01164de6e4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_unix.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows,!plan9,!solaris,!linux
+
+package fileutil
+
+import (
+ "os"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ return flockTryLockFile(path, flag, perm)
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ return flockLockFile(path, flag, perm)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go
new file mode 100644
index 000000000000..b1817230a3cf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/lock_windows.go
@@ -0,0 +1,125 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows
+
+package fileutil
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procLockFileEx = modkernel32.NewProc("LockFileEx")
+
+ errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.")
+)
+
+const (
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
+ LOCKFILE_EXCLUSIVE_LOCK = 2
+ LOCKFILE_FAIL_IMMEDIATELY = 1
+
+ // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
+ errLockViolation syscall.Errno = 0x21
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := open(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := open(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func open(path string, flag int, perm os.FileMode) (*os.File, error) {
+ if path == "" {
+ return nil, fmt.Errorf("cannot open empty filename")
+ }
+ var access uint32
+ switch flag {
+ case syscall.O_RDONLY:
+ access = syscall.GENERIC_READ
+ case syscall.O_WRONLY:
+ access = syscall.GENERIC_WRITE
+ case syscall.O_RDWR:
+ access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+ case syscall.O_WRONLY | syscall.O_CREAT:
+ access = syscall.GENERIC_ALL
+ default:
+ panic(fmt.Errorf("flag %v is not supported", flag))
+ }
+ fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
+ access,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil,
+ syscall.OPEN_ALWAYS,
+ syscall.FILE_ATTRIBUTE_NORMAL,
+ 0)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), path), nil
+}
+
+func lockFile(fd syscall.Handle, flags uint32) error {
+ var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK
+ flag |= flags
+ if fd == syscall.InvalidHandle {
+ return nil
+ }
+ err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{})
+ if err == nil {
+ return nil
+ } else if err.Error() == errLocked.Error() {
+ return ErrLocked
+ } else if err != errLockViolation {
+ return err
+ }
+ return nil
+}
+
+func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
+ var reserved uint32 = 0
+ r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
+ if r1 == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go
new file mode 100644
index 000000000000..c747b7cf81f9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate.go
@@ -0,0 +1,54 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "io"
+ "os"
+)
+
+// Preallocate tries to allocate the space for given
+// file. This operation is only supported on linux by a
+// few filesystems (btrfs, ext4, etc.).
+// If the operation is unsupported, no error will be returned.
+// Otherwise, the error encountered will be returned.
+func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
+ if sizeInBytes == 0 {
+ // fallocate will return EINVAL if length is 0; skip
+ return nil
+ }
+ if extendFile {
+ return preallocExtend(f, sizeInBytes)
+ }
+ return preallocFixed(f, sizeInBytes)
+}
+
+func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
+ curOff, err := f.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ size, err := f.Seek(sizeInBytes, io.SeekEnd)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Seek(curOff, io.SeekStart); err != nil {
+ return err
+ }
+ if sizeInBytes > size {
+ return nil
+ }
+ return f.Truncate(sizeInBytes)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go
new file mode 100644
index 000000000000..5a6dccfa796f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_darwin.go
@@ -0,0 +1,65 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+ if err := preallocFixed(f, sizeInBytes); err != nil {
+ return err
+ }
+ return preallocExtendTrunc(f, sizeInBytes)
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error {
+ // allocate all requested space or no space at all
+ // TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag
+ fstore := &syscall.Fstore_t{
+ Flags: syscall.F_ALLOCATEALL,
+ Posmode: syscall.F_PEOFPOSMODE,
+ Length: sizeInBytes}
+ p := unsafe.Pointer(fstore)
+ _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p))
+ if errno == 0 || errno == syscall.ENOTSUP {
+ return nil
+ }
+
+ // wrong argument to fallocate syscall
+ if errno == syscall.EINVAL {
+ // filesystem "st_blocks" are allocated in the units of
+ // "Allocation Block Size" (run "diskutil info /" command)
+ var stat syscall.Stat_t
+ syscall.Fstat(int(f.Fd()), &stat)
+
+ // syscall.Statfs_t.Bsize is "optimal transfer block size"
+ // and contains matching 4096 value when latest OS X kernel
+ // supports 4,096 KB filesystem block size
+ var statfs syscall.Statfs_t
+ syscall.Fstatfs(int(f.Fd()), &statfs)
+ blockSize := int64(statfs.Bsize)
+
+ if stat.Blocks*blockSize >= sizeInBytes {
+ // enough blocks are already allocated
+ return nil
+ }
+ }
+ return errno
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go
new file mode 100644
index 000000000000..50bd84f02ada
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unix.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+ // use mode = 0 to change size
+ err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
+ if err != nil {
+ errno, ok := err.(syscall.Errno)
+ // not supported; fallback
+ // fallocate EINTRs frequently in some environments; fallback
+ if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
+ return preallocExtendTrunc(f, sizeInBytes)
+ }
+ }
+ return err
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error {
+ // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
+ err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
+ if err != nil {
+ errno, ok := err.(syscall.Errno)
+ // treat not supported as nil error
+ if ok && errno == syscall.ENOTSUP {
+ return nil
+ }
+ }
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go
new file mode 100644
index 000000000000..162fbc5f7826
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/preallocate_unsupported.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux,!darwin
+
+package fileutil
+
+import "os"
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+ return preallocExtendTrunc(f, sizeInBytes)
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error { return nil }
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go
new file mode 100644
index 000000000000..d116f340b6f4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/purge.go
@@ -0,0 +1,98 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
+ return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil)
+}
+
+func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
+ doneC := make(chan struct{})
+ errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC)
+ return doneC, errC
+}
+
+// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
+// if donec is non-nil, the function closes it to notify its exit.
+func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error {
+ errC := make(chan error, 1)
+ go func() {
+ if donec != nil {
+ defer close(donec)
+ }
+ for {
+ fnames, err := ReadDir(dirname)
+ if err != nil {
+ errC <- err
+ return
+ }
+ newfnames := make([]string, 0)
+ for _, fname := range fnames {
+ if strings.HasSuffix(fname, suffix) {
+ newfnames = append(newfnames, fname)
+ }
+ }
+ sort.Strings(newfnames)
+ fnames = newfnames
+ for len(newfnames) > int(max) {
+ f := filepath.Join(dirname, newfnames[0])
+ l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
+ if err != nil {
+ break
+ }
+ if err = os.Remove(f); err != nil {
+ errC <- err
+ return
+ }
+ if err = l.Close(); err != nil {
+ if lg != nil {
+ lg.Warn("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
+ } else {
+ plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
+ }
+ errC <- err
+ return
+ }
+ if lg != nil {
+ lg.Info("purged", zap.String("path", f))
+ } else {
+ plog.Infof("purged file %s successfully", f)
+ }
+ newfnames = newfnames[1:]
+ }
+ if purgec != nil {
+ for i := 0; i < len(fnames)-len(newfnames); i++ {
+ purgec <- fnames[i]
+ }
+ }
+ select {
+ case <-time.After(interval):
+ case <-stop:
+ return
+ }
+ }
+ }()
+ return errC
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go
new file mode 100644
index 000000000000..2eeaa89bc044
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/read_dir.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+// ReadDirOp represents an read-directory operation.
+type ReadDirOp struct {
+ ext string
+}
+
+// ReadDirOption configures archiver operations.
+type ReadDirOption func(*ReadDirOp)
+
+// WithExt filters file names by their extensions.
+// (e.g. WithExt(".wal") to list only WAL files)
+func WithExt(ext string) ReadDirOption {
+ return func(op *ReadDirOp) { op.ext = ext }
+}
+
+func (op *ReadDirOp) applyOpts(opts []ReadDirOption) {
+ for _, opt := range opts {
+ opt(op)
+ }
+}
+
+// ReadDir returns the filenames in the given directory in sorted order.
+func ReadDir(d string, opts ...ReadDirOption) ([]string, error) {
+ op := &ReadDirOp{}
+ op.applyOpts(opts)
+
+ dir, err := os.Open(d)
+ if err != nil {
+ return nil, err
+ }
+ defer dir.Close()
+
+ names, err := dir.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+
+ if op.ext != "" {
+ tss := make([]string, 0)
+ for _, v := range names {
+ if filepath.Ext(v) == op.ext {
+ tss = append(tss, v)
+ }
+ }
+ names = tss
+ }
+ return names, nil
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go
new file mode 100644
index 000000000000..54dd41f4f351
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/sync.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux,!darwin
+
+package fileutil
+
+import "os"
+
+// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
+func Fsync(f *os.File) error {
+ return f.Sync()
+}
+
+// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform.
+func Fdatasync(f *os.File) error {
+ return f.Sync()
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go
new file mode 100644
index 000000000000..c2f39bf204d2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_darwin.go
@@ -0,0 +1,40 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+// Fsync on HFS/OSX flushes the data on to the physical drive but the drive
+// may not write it to the persistent media for quite sometime and it may be
+// written in out-of-order sequence. Using F_FULLFSYNC ensures that the
+// physical drive's buffer will also get flushed to the media.
+func Fsync(f *os.File) error {
+ _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0))
+ if errno == 0 {
+ return nil
+ }
+ return errno
+}
+
+// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
+// on physical drive media.
+func Fdatasync(f *os.File) error {
+ return Fsync(f)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go
new file mode 100644
index 000000000000..1bbced915e9b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/fileutil/sync_linux.go
@@ -0,0 +1,34 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
+func Fsync(f *os.File) error {
+ return f.Sync()
+}
+
+// Fdatasync is similar to fsync(), but does not flush modified metadata
+// unless that metadata is needed in order to allow a subsequent data retrieval
+// to be correctly handled.
+func Fdatasync(f *os.File) error {
+ return syscall.Fdatasync(int(f.Fd()))
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/flags/flag.go b/vendor/go.etcd.io/etcd/pkg/flags/flag.go
new file mode 100644
index 000000000000..215902cf8f31
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/flags/flag.go
@@ -0,0 +1,121 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package flags implements command-line flag parsing.
+package flags
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/coreos/pkg/capnslog"
+ "github.com/spf13/pflag"
+)
+
+var plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/flags")
+
+// SetFlagsFromEnv parses all registered flags in the given flagset,
+// and if they are not already set it attempts to set their values from
+// environment variables. Environment variables take the name of the flag but
+// are UPPERCASE, have the given prefix and any dashes are replaced by
+// underscores - for example: some-flag => ETCD_SOME_FLAG
+func SetFlagsFromEnv(prefix string, fs *flag.FlagSet) error {
+ var err error
+ alreadySet := make(map[string]bool)
+ fs.Visit(func(f *flag.Flag) {
+ alreadySet[FlagToEnv(prefix, f.Name)] = true
+ })
+ usedEnvKey := make(map[string]bool)
+ fs.VisitAll(func(f *flag.Flag) {
+ if serr := setFlagFromEnv(fs, prefix, f.Name, usedEnvKey, alreadySet, true); serr != nil {
+ err = serr
+ }
+ })
+ verifyEnv(prefix, usedEnvKey, alreadySet)
+ return err
+}
+
+// SetPflagsFromEnv is similar to SetFlagsFromEnv. However, the accepted flagset type is pflag.FlagSet
+// and it does not do any logging.
+func SetPflagsFromEnv(prefix string, fs *pflag.FlagSet) error {
+ var err error
+ alreadySet := make(map[string]bool)
+ usedEnvKey := make(map[string]bool)
+ fs.VisitAll(func(f *pflag.Flag) {
+ if f.Changed {
+ alreadySet[FlagToEnv(prefix, f.Name)] = true
+ }
+ if serr := setFlagFromEnv(fs, prefix, f.Name, usedEnvKey, alreadySet, false); serr != nil {
+ err = serr
+ }
+ })
+ verifyEnv(prefix, usedEnvKey, alreadySet)
+ return err
+}
+
+// FlagToEnv converts flag string to upper-case environment variable key string.
+func FlagToEnv(prefix, name string) string {
+ return prefix + "_" + strings.ToUpper(strings.Replace(name, "-", "_", -1))
+}
+
+func verifyEnv(prefix string, usedEnvKey, alreadySet map[string]bool) {
+ for _, env := range os.Environ() {
+ kv := strings.SplitN(env, "=", 2)
+ if len(kv) != 2 {
+ plog.Warningf("found invalid env %s", env)
+ }
+ if usedEnvKey[kv[0]] {
+ continue
+ }
+ if alreadySet[kv[0]] {
+ plog.Fatalf("conflicting environment variable %q is shadowed by corresponding command-line flag (either unset environment variable or disable flag)", kv[0])
+ }
+ if strings.HasPrefix(env, prefix+"_") {
+ plog.Warningf("unrecognized environment variable %s", env)
+ }
+ }
+}
+
+type flagSetter interface {
+ Set(fk string, fv string) error
+}
+
+func setFlagFromEnv(fs flagSetter, prefix, fname string, usedEnvKey, alreadySet map[string]bool, log bool) error {
+ key := FlagToEnv(prefix, fname)
+ if !alreadySet[key] {
+ val := os.Getenv(key)
+ if val != "" {
+ usedEnvKey[key] = true
+ if serr := fs.Set(fname, val); serr != nil {
+ return fmt.Errorf("invalid value %q for %s: %v", val, key, serr)
+ }
+ if log {
+ plog.Infof("recognized and used environment variable %s=%s", key, val)
+ }
+ }
+ }
+ return nil
+}
+
+func IsSet(fs *flag.FlagSet, name string) bool {
+ set := false
+ fs.Visit(func(f *flag.Flag) {
+ if f.Name == name {
+ set = true
+ }
+ })
+ return set
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/flags/ignored.go b/vendor/go.etcd.io/etcd/pkg/flags/ignored.go
new file mode 100644
index 000000000000..9953049000ff
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/flags/ignored.go
@@ -0,0 +1,36 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+// IgnoredFlag encapsulates a flag that may have been previously valid but is
+// now ignored. If an IgnoredFlag is set, a warning is printed and
+// operation continues.
+type IgnoredFlag struct {
+ Name string
+}
+
+// IsBoolFlag is defined to allow the flag to be defined without an argument
+func (f *IgnoredFlag) IsBoolFlag() bool {
+ return true
+}
+
+func (f *IgnoredFlag) Set(s string) error {
+ plog.Warningf(`flag "-%s" is no longer supported - ignoring.`, f.Name)
+ return nil
+}
+
+func (f *IgnoredFlag) String() string {
+ return ""
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/flags/selective_string.go b/vendor/go.etcd.io/etcd/pkg/flags/selective_string.go
new file mode 100644
index 000000000000..4b90fbf4b492
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/flags/selective_string.go
@@ -0,0 +1,114 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// SelectiveStringValue implements the flag.Value interface.
+type SelectiveStringValue struct {
+ v string
+ valids map[string]struct{}
+}
+
+// Set verifies the argument to be a valid member of the allowed values
+// before setting the underlying flag value.
+func (ss *SelectiveStringValue) Set(s string) error {
+ if _, ok := ss.valids[s]; ok {
+ ss.v = s
+ return nil
+ }
+ return errors.New("invalid value")
+}
+
+// String returns the set value (if any) of the SelectiveStringValue
+func (ss *SelectiveStringValue) String() string {
+ return ss.v
+}
+
+// Valids returns the list of valid strings.
+func (ss *SelectiveStringValue) Valids() []string {
+ s := make([]string, 0, len(ss.valids))
+ for k := range ss.valids {
+ s = append(s, k)
+ }
+ sort.Strings(s)
+ return s
+}
+
+// NewSelectiveStringValue creates a new string flag
+// for which any one of the given strings is a valid value,
+// and any other value is an error.
+//
+// valids[0] will be default value. Caller must be sure
+// len(valids) != 0 or it will panic.
+func NewSelectiveStringValue(valids ...string) *SelectiveStringValue {
+ vm := make(map[string]struct{})
+ for _, v := range valids {
+ vm[v] = struct{}{}
+ }
+ return &SelectiveStringValue{valids: vm, v: valids[0]}
+}
+
+// SelectiveStringsValue implements the flag.Value interface.
+type SelectiveStringsValue struct {
+ vs []string
+ valids map[string]struct{}
+}
+
+// Set verifies the argument to be a valid member of the allowed values
+// before setting the underlying flag value.
+func (ss *SelectiveStringsValue) Set(s string) error {
+ vs := strings.Split(s, ",")
+ for i := range vs {
+ if _, ok := ss.valids[vs[i]]; ok {
+ ss.vs = append(ss.vs, vs[i])
+ } else {
+ return fmt.Errorf("invalid value %q", vs[i])
+ }
+ }
+ sort.Strings(ss.vs)
+ return nil
+}
+
+// String returns the set value (if any) of the SelectiveStringsValue.
+func (ss *SelectiveStringsValue) String() string {
+ return strings.Join(ss.vs, ",")
+}
+
+// Valids returns the list of valid strings.
+func (ss *SelectiveStringsValue) Valids() []string {
+ s := make([]string, 0, len(ss.valids))
+ for k := range ss.valids {
+ s = append(s, k)
+ }
+ sort.Strings(s)
+ return s
+}
+
+// NewSelectiveStringsValue creates a new string slice flag
+// for which any one of the given strings is a valid value,
+// and any other value is an error.
+func NewSelectiveStringsValue(valids ...string) *SelectiveStringsValue {
+ vm := make(map[string]struct{})
+ for _, v := range valids {
+ vm[v] = struct{}{}
+ }
+ return &SelectiveStringsValue{valids: vm, vs: []string{}}
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/flags/strings.go b/vendor/go.etcd.io/etcd/pkg/flags/strings.go
new file mode 100644
index 000000000000..3e47fb38e193
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/flags/strings.go
@@ -0,0 +1,52 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "sort"
+ "strings"
+)
+
+// StringsValue wraps "sort.StringSlice".
+type StringsValue sort.StringSlice
+
+// Set parses a command line set of strings, separated by comma.
+// Implements "flag.Value" interface.
+func (ss *StringsValue) Set(s string) error {
+ *ss = strings.Split(s, ",")
+ return nil
+}
+
+// String implements "flag.Value" interface.
+func (ss *StringsValue) String() string { return strings.Join(*ss, ",") }
+
+// NewStringsValue implements string slice as "flag.Value" interface.
+// Given value is to be separated by comma.
+func NewStringsValue(s string) (ss *StringsValue) {
+ if s == "" {
+ return &StringsValue{}
+ }
+ ss = new(StringsValue)
+ if err := ss.Set(s); err != nil {
+ plog.Panicf("new StringsValue should never fail: %v", err)
+ }
+ return ss
+}
+
+// StringsFromFlag returns a string slice from the flag.
+func StringsFromFlag(fs *flag.FlagSet, flagName string) []string {
+ return []string(*fs.Lookup(flagName).Value.(*StringsValue))
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/flags/unique_strings.go b/vendor/go.etcd.io/etcd/pkg/flags/unique_strings.go
new file mode 100644
index 000000000000..e220ee07a730
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/flags/unique_strings.go
@@ -0,0 +1,76 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "sort"
+ "strings"
+)
+
+// UniqueStringsValue wraps a list of unique strings.
+// The values are set in order.
+type UniqueStringsValue struct {
+ Values map[string]struct{}
+}
+
+// Set parses a command line set of strings, separated by comma.
+// Implements "flag.Value" interface.
+// The values are set in order.
+func (us *UniqueStringsValue) Set(s string) error {
+ us.Values = make(map[string]struct{})
+ for _, v := range strings.Split(s, ",") {
+ us.Values[v] = struct{}{}
+ }
+ return nil
+}
+
+// String implements "flag.Value" interface.
+func (us *UniqueStringsValue) String() string {
+ return strings.Join(us.stringSlice(), ",")
+}
+
+func (us *UniqueStringsValue) stringSlice() []string {
+ ss := make([]string, 0, len(us.Values))
+ for v := range us.Values {
+ ss = append(ss, v)
+ }
+ sort.Strings(ss)
+ return ss
+}
+
+// NewUniqueStringsValue implements string slice as "flag.Value" interface.
+// Given value is to be separated by comma.
+// The values are set in order.
+func NewUniqueStringsValue(s string) (us *UniqueStringsValue) {
+ us = &UniqueStringsValue{Values: make(map[string]struct{})}
+ if s == "" {
+ return us
+ }
+ if err := us.Set(s); err != nil {
+ plog.Panicf("new UniqueStringsValue should never fail: %v", err)
+ }
+ return us
+}
+
+// UniqueStringsFromFlag returns a string slice from the flag.
+func UniqueStringsFromFlag(fs *flag.FlagSet, flagName string) []string {
+ return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).stringSlice()
+}
+
+// UniqueStringsMapFromFlag returns a map of strings from the flag.
+func UniqueStringsMapFromFlag(fs *flag.FlagSet, flagName string) map[string]struct{} {
+ return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).Values
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/flags/unique_urls.go b/vendor/go.etcd.io/etcd/pkg/flags/unique_urls.go
new file mode 100644
index 000000000000..9b4178c3a145
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/flags/unique_urls.go
@@ -0,0 +1,92 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "net/url"
+ "sort"
+ "strings"
+
+ "go.etcd.io/etcd/pkg/types"
+)
+
+// UniqueURLs contains unique URLs
+// with non-URL exceptions.
+type UniqueURLs struct {
+ Values map[string]struct{}
+ uss []url.URL
+ Allowed map[string]struct{}
+}
+
+// Set parses a command line set of URLs formatted like:
+// http://127.0.0.1:2380,http://10.1.1.2:80
+// Implements "flag.Value" interface.
+func (us *UniqueURLs) Set(s string) error {
+ if _, ok := us.Values[s]; ok {
+ return nil
+ }
+ if _, ok := us.Allowed[s]; ok {
+ us.Values[s] = struct{}{}
+ return nil
+ }
+ ss, err := types.NewURLs(strings.Split(s, ","))
+ if err != nil {
+ return err
+ }
+ us.Values = make(map[string]struct{})
+ us.uss = make([]url.URL, 0)
+ for _, v := range ss {
+ us.Values[v.String()] = struct{}{}
+ us.uss = append(us.uss, v)
+ }
+ return nil
+}
+
+// String implements "flag.Value" interface.
+func (us *UniqueURLs) String() string {
+ all := make([]string, 0, len(us.Values))
+ for u := range us.Values {
+ all = append(all, u)
+ }
+ sort.Strings(all)
+ return strings.Join(all, ",")
+}
+
+// NewUniqueURLsWithExceptions implements "url.URL" slice as flag.Value interface.
+// Given value is to be separated by comma.
+func NewUniqueURLsWithExceptions(s string, exceptions ...string) *UniqueURLs {
+ us := &UniqueURLs{Values: make(map[string]struct{}), Allowed: make(map[string]struct{})}
+ for _, v := range exceptions {
+ us.Allowed[v] = struct{}{}
+ }
+ if s == "" {
+ return us
+ }
+ if err := us.Set(s); err != nil {
+ plog.Panicf("new UniqueURLs should never fail: %v", err)
+ }
+ return us
+}
+
+// UniqueURLsFromFlag returns a slice from urls got from the flag.
+func UniqueURLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL {
+ return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).uss
+}
+
+// UniqueURLsMapFromFlag returns a map from url strings got from the flag.
+func UniqueURLsMapFromFlag(fs *flag.FlagSet, urlsFlagName string) map[string]struct{} {
+ return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).Values
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/flags/urls.go b/vendor/go.etcd.io/etcd/pkg/flags/urls.go
new file mode 100644
index 000000000000..ca90970c2b91
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/flags/urls.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "net/url"
+ "strings"
+
+ "go.etcd.io/etcd/pkg/types"
+)
+
+// URLsValue wraps "types.URLs".
+type URLsValue types.URLs
+
+// Set parses a command line set of URLs formatted like:
+// http://127.0.0.1:2380,http://10.1.1.2:80
+// Implements "flag.Value" interface.
+func (us *URLsValue) Set(s string) error {
+ ss, err := types.NewURLs(strings.Split(s, ","))
+ if err != nil {
+ return err
+ }
+ *us = URLsValue(ss)
+ return nil
+}
+
+// String implements "flag.Value" interface.
+func (us *URLsValue) String() string {
+ all := make([]string, len(*us))
+ for i, u := range *us {
+ all[i] = u.String()
+ }
+ return strings.Join(all, ",")
+}
+
+// NewURLsValue implements "url.URL" slice as flag.Value interface.
+// Given value is to be separated by comma.
+func NewURLsValue(s string) *URLsValue {
+ if s == "" {
+ return &URLsValue{}
+ }
+ v := &URLsValue{}
+ if err := v.Set(s); err != nil {
+ plog.Panicf("new URLsValue should never fail: %v", err)
+ }
+ return v
+}
+
+// URLsFromFlag returns a slices from url got from the flag.
+func URLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL {
+ return []url.URL(*fs.Lookup(urlsFlagName).Value.(*URLsValue))
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/httputil/httputil.go b/vendor/go.etcd.io/etcd/pkg/httputil/httputil.go
new file mode 100644
index 000000000000..3bf58a3a1dc1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/httputil/httputil.go
@@ -0,0 +1,50 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httputil provides HTTP utility functions.
+package httputil
+
+import (
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+)
+
+// GracefulClose drains http.Response.Body until it hits EOF
+// and closes it. This prevents TCP/TLS connections from closing,
+// therefore available for reuse.
+// Borrowed from golang/net/context/ctxhttp/cancelreq.go.
+func GracefulClose(resp *http.Response) {
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+}
+
+// GetHostname returns the hostname from request Host field.
+// It returns empty string, if Host field contains invalid
+// value (e.g. "localhost:::" with too many colons).
+func GetHostname(req *http.Request) string {
+ if req == nil {
+ return ""
+ }
+ h, _, err := net.SplitHostPort(req.Host)
+ if err != nil {
+ return req.Host
+ }
+ return h
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/idutil/id.go b/vendor/go.etcd.io/etcd/pkg/idutil/id.go
new file mode 100644
index 000000000000..63a02cd73461
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/idutil/id.go
@@ -0,0 +1,75 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package idutil implements utility functions for generating unique,
+// randomized ids.
+package idutil
+
+import (
+ "math"
+ "sync/atomic"
+ "time"
+)
+
+const (
+ tsLen = 5 * 8
+ cntLen = 8
+ suffixLen = tsLen + cntLen
+)
+
+// Generator generates unique identifiers based on counters, timestamps, and
+// a node member ID.
+//
+// The initial id is in this format:
+// High order 2 bytes are from memberID, next 5 bytes are from timestamp,
+// and low order one byte is a counter.
+// | prefix | suffix |
+// | 2 bytes | 5 bytes | 1 byte |
+// | memberID | timestamp | cnt |
+//
+// The timestamp 5 bytes is different when the machine is restart
+// after 1 ms and before 35 years.
+//
+// It increases suffix to generate the next id.
+// The count field may overflow to timestamp field, which is intentional.
+// It helps to extend the event window to 2^56. This doesn't break that
+// id generated after restart is unique because etcd throughput is <<
+// 256req/ms(250k reqs/second).
+type Generator struct {
+ // high order 2 bytes
+ prefix uint64
+ // low order 6 bytes
+ suffix uint64
+}
+
+func NewGenerator(memberID uint16, now time.Time) *Generator {
+ prefix := uint64(memberID) << suffixLen
+ unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond)
+ suffix := lowbit(unixMilli, tsLen) << cntLen
+ return &Generator{
+ prefix: prefix,
+ suffix: suffix,
+ }
+}
+
+// Next generates a id that is unique.
+func (g *Generator) Next() uint64 {
+ suffix := atomic.AddUint64(&g.suffix, 1)
+ id := g.prefix | lowbit(suffix, suffixLen)
+ return id
+}
+
+func lowbit(x uint64, n uint) uint64 {
+ return x & (math.MaxUint64 >> (64 - n))
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/ioutil/pagewriter.go b/vendor/go.etcd.io/etcd/pkg/ioutil/pagewriter.go
new file mode 100644
index 000000000000..cf9a8dc664dc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/ioutil/pagewriter.go
@@ -0,0 +1,117 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ioutil
+
+import (
+ "io"
+)
+
+var defaultBufferBytes = 128 * 1024
+
+// PageWriter implements the io.Writer interface so that writes will
+// either be in page chunks or from flushing.
+type PageWriter struct {
+ w io.Writer
+ // pageOffset tracks the page offset of the base of the buffer
+ pageOffset int
+ // pageBytes is the number of bytes per page
+ pageBytes int
+ // bufferedBytes counts the number of bytes pending for write in the buffer
+ bufferedBytes int
+ // buf holds the write buffer
+ buf []byte
+ // bufWatermarkBytes is the number of bytes the buffer can hold before it needs
+ // to be flushed. It is less than len(buf) so there is space for slack writes
+ // to bring the writer to page alignment.
+ bufWatermarkBytes int
+}
+
+// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes
+// to write per page. pageOffset is the starting offset of io.Writer.
+func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter {
+ return &PageWriter{
+ w: w,
+ pageOffset: pageOffset,
+ pageBytes: pageBytes,
+ buf: make([]byte, defaultBufferBytes+pageBytes),
+ bufWatermarkBytes: defaultBufferBytes,
+ }
+}
+
+func (pw *PageWriter) Write(p []byte) (n int, err error) {
+ if len(p)+pw.bufferedBytes <= pw.bufWatermarkBytes {
+ // no overflow
+ copy(pw.buf[pw.bufferedBytes:], p)
+ pw.bufferedBytes += len(p)
+ return len(p), nil
+ }
+ // complete the slack page in the buffer if unaligned
+ slack := pw.pageBytes - ((pw.pageOffset + pw.bufferedBytes) % pw.pageBytes)
+ if slack != pw.pageBytes {
+ partial := slack > len(p)
+ if partial {
+ // not enough data to complete the slack page
+ slack = len(p)
+ }
+ // special case: writing to slack page in buffer
+ copy(pw.buf[pw.bufferedBytes:], p[:slack])
+ pw.bufferedBytes += slack
+ n = slack
+ p = p[slack:]
+ if partial {
+ // avoid forcing an unaligned flush
+ return n, nil
+ }
+ }
+ // buffer contents are now page-aligned; clear out
+ if err = pw.Flush(); err != nil {
+ return n, err
+ }
+ // directly write all complete pages without copying
+ if len(p) > pw.pageBytes {
+ pages := len(p) / pw.pageBytes
+ c, werr := pw.w.Write(p[:pages*pw.pageBytes])
+ n += c
+ if werr != nil {
+ return n, werr
+ }
+ p = p[pages*pw.pageBytes:]
+ }
+ // write remaining tail to buffer
+ c, werr := pw.Write(p)
+ n += c
+ return n, werr
+}
+
+// Flush flushes buffered data.
+func (pw *PageWriter) Flush() error {
+ _, err := pw.flush()
+ return err
+}
+
+// FlushN flushes buffered data and returns the number of written bytes.
+func (pw *PageWriter) FlushN() (int, error) {
+ return pw.flush()
+}
+
+func (pw *PageWriter) flush() (int, error) {
+ if pw.bufferedBytes == 0 {
+ return 0, nil
+ }
+ n, err := pw.w.Write(pw.buf[:pw.bufferedBytes])
+ pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes
+ pw.bufferedBytes = 0
+ return n, err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/ioutil/readcloser.go b/vendor/go.etcd.io/etcd/pkg/ioutil/readcloser.go
new file mode 100644
index 000000000000..d3efcfe3d5a6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/ioutil/readcloser.go
@@ -0,0 +1,66 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ioutil
+
+import (
+ "fmt"
+ "io"
+)
+
+// ReaderAndCloser implements io.ReadCloser interface by combining
+// reader and closer together.
+type ReaderAndCloser struct {
+ io.Reader
+ io.Closer
+}
+
+var (
+ ErrShortRead = fmt.Errorf("ioutil: short read")
+ ErrExpectEOF = fmt.Errorf("ioutil: expect EOF")
+)
+
+// NewExactReadCloser returns a ReadCloser that returns errors if the underlying
+// reader does not read back exactly the requested number of bytes.
+func NewExactReadCloser(rc io.ReadCloser, totalBytes int64) io.ReadCloser {
+ return &exactReadCloser{rc: rc, totalBytes: totalBytes}
+}
+
+type exactReadCloser struct {
+ rc io.ReadCloser
+ br int64
+ totalBytes int64
+}
+
+func (e *exactReadCloser) Read(p []byte) (int, error) {
+ n, err := e.rc.Read(p)
+ e.br += int64(n)
+ if e.br > e.totalBytes {
+ return 0, ErrExpectEOF
+ }
+ if e.br < e.totalBytes && n == 0 {
+ return 0, ErrShortRead
+ }
+ return n, err
+}
+
+func (e *exactReadCloser) Close() error {
+ if err := e.rc.Close(); err != nil {
+ return err
+ }
+ if e.br < e.totalBytes {
+ return ErrShortRead
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/ioutil/reader.go b/vendor/go.etcd.io/etcd/pkg/ioutil/reader.go
new file mode 100644
index 000000000000..0703ed476d80
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/ioutil/reader.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ioutil implements I/O utility functions.
+package ioutil
+
+import "io"
+
+// NewLimitedBufferReader returns a reader that reads from the given reader
+// but limits the amount of data returned to at most n bytes.
+func NewLimitedBufferReader(r io.Reader, n int) io.Reader {
+ return &limitedBufferReader{
+ r: r,
+ n: n,
+ }
+}
+
+type limitedBufferReader struct {
+ r io.Reader
+ n int
+}
+
+func (r *limitedBufferReader) Read(p []byte) (n int, err error) {
+ np := p
+ if len(np) > r.n {
+ np = np[:r.n]
+ }
+ return r.r.Read(np)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/ioutil/util.go b/vendor/go.etcd.io/etcd/pkg/ioutil/util.go
new file mode 100644
index 000000000000..6a6746e0b56b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/ioutil/util.go
@@ -0,0 +1,43 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ioutil
+
+import (
+ "io"
+ "os"
+
+ "go.etcd.io/etcd/pkg/fileutil"
+)
+
+// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library,
+// but calls Sync before closing the file. WriteAndSyncFile guarantees the data
+// is synced if there is no error returned.
+func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err == nil {
+ err = fileutil.Fsync(f)
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/netutil/doc.go b/vendor/go.etcd.io/etcd/pkg/netutil/doc.go
new file mode 100644
index 000000000000..5d92d03a6ff5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/netutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package netutil implements network-related utility functions.
+package netutil
diff --git a/vendor/go.etcd.io/etcd/pkg/netutil/isolate_linux.go b/vendor/go.etcd.io/etcd/pkg/netutil/isolate_linux.go
new file mode 100644
index 000000000000..418580ac48de
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/netutil/isolate_linux.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package netutil
+
+import (
+ "fmt"
+ "os/exec"
+)
+
+// DropPort drops all tcp packets that are received from the given port and sent to the given port.
+func DropPort(port int) error {
+ cmdStr := fmt.Sprintf("sudo iptables -A OUTPUT -p tcp --destination-port %d -j DROP", port)
+ if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
+ return err
+ }
+ cmdStr = fmt.Sprintf("sudo iptables -A INPUT -p tcp --destination-port %d -j DROP", port)
+ _, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
+ return err
+}
+
+// RecoverPort stops dropping tcp packets at given port.
+func RecoverPort(port int) error {
+ cmdStr := fmt.Sprintf("sudo iptables -D OUTPUT -p tcp --destination-port %d -j DROP", port)
+ if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
+ return err
+ }
+ cmdStr = fmt.Sprintf("sudo iptables -D INPUT -p tcp --destination-port %d -j DROP", port)
+ _, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
+ return err
+}
+
+// SetLatency adds latency in millisecond scale with random variations.
+func SetLatency(ms, rv int) error {
+ ifces, err := GetDefaultInterfaces()
+ if err != nil {
+ return err
+ }
+
+ if rv > ms {
+ rv = 1
+ }
+ for ifce := range ifces {
+ cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
+ _, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
+ if err != nil {
+ // the rule has already been added. Overwrite it.
+ cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
+ _, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// RemoveLatency resets latency configurations.
+func RemoveLatency() error {
+ ifces, err := GetDefaultInterfaces()
+ if err != nil {
+ return err
+ }
+ for ifce := range ifces {
+ _, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/netutil/isolate_stub.go b/vendor/go.etcd.io/etcd/pkg/netutil/isolate_stub.go
new file mode 100644
index 000000000000..7f4c3e67c2ad
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/netutil/isolate_stub.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package netutil
+
+func DropPort(port int) error { return nil }
+
+func RecoverPort(port int) error { return nil }
+
+func SetLatency(ms, rv int) error { return nil }
+
+func RemoveLatency() error { return nil }
diff --git a/vendor/go.etcd.io/etcd/pkg/netutil/netutil.go b/vendor/go.etcd.io/etcd/pkg/netutil/netutil.go
new file mode 100644
index 000000000000..faef6466eeb5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/netutil/netutil.go
@@ -0,0 +1,213 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package netutil
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/url"
+ "reflect"
+ "sort"
+ "time"
+
+ "go.etcd.io/etcd/pkg/types"
+
+ "go.uber.org/zap"
+)
+
+// indirection for testing
+var resolveTCPAddr = resolveTCPAddrDefault
+
+const retryInterval = time.Second
+
+// taken from go's ResolveTCP code but uses configurable ctx
+func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) {
+ host, port, serr := net.SplitHostPort(addr)
+ if serr != nil {
+ return nil, serr
+ }
+ portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port)
+ if perr != nil {
+ return nil, perr
+ }
+
+ var ips []net.IPAddr
+ if ip := net.ParseIP(host); ip != nil {
+ ips = []net.IPAddr{{IP: ip}}
+ } else {
+ // Try as a DNS name.
+ ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host)
+ if err != nil {
+ return nil, err
+ }
+ ips = ipss
+ }
+ // randomize?
+ ip := ips[0]
+ return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil
+}
+
+// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
+// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
+// are resolved.
+func resolveTCPAddrs(ctx context.Context, lg *zap.Logger, urls [][]url.URL) ([][]url.URL, error) {
+ newurls := make([][]url.URL, 0)
+ for _, us := range urls {
+ nus := make([]url.URL, len(us))
+ for i, u := range us {
+ nu, err := url.Parse(u.String())
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse %q (%v)", u.String(), err)
+ }
+ nus[i] = *nu
+ }
+ for i, u := range nus {
+ h, err := resolveURL(ctx, lg, u)
+ if err != nil {
+ return nil, fmt.Errorf("failed to resolve %q (%v)", u.String(), err)
+ }
+ if h != "" {
+ nus[i].Host = h
+ }
+ }
+ newurls = append(newurls, nus)
+ }
+ return newurls, nil
+}
+
+func resolveURL(ctx context.Context, lg *zap.Logger, u url.URL) (string, error) {
+ if u.Scheme == "unix" || u.Scheme == "unixs" {
+ // unix sockets don't resolve over TCP
+ return "", nil
+ }
+ host, _, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ lg.Warn(
+ "failed to parse URL Host while resolving URL",
+ zap.String("url", u.String()),
+ zap.String("host", u.Host),
+ zap.Error(err),
+ )
+ return "", err
+ }
+ if host == "localhost" || net.ParseIP(host) != nil {
+ return "", nil
+ }
+ for ctx.Err() == nil {
+ tcpAddr, err := resolveTCPAddr(ctx, u.Host)
+ if err == nil {
+ lg.Info(
+ "resolved URL Host",
+ zap.String("url", u.String()),
+ zap.String("host", u.Host),
+ zap.String("resolved-addr", tcpAddr.String()),
+ )
+ return tcpAddr.String(), nil
+ }
+
+ lg.Warn(
+ "failed to resolve URL Host",
+ zap.String("url", u.String()),
+ zap.String("host", u.Host),
+ zap.Duration("retry-interval", retryInterval),
+ zap.Error(err),
+ )
+
+ select {
+ case <-ctx.Done():
+ lg.Warn(
+ "failed to resolve URL Host; returning",
+ zap.String("url", u.String()),
+ zap.String("host", u.Host),
+ zap.Duration("retry-interval", retryInterval),
+ zap.Error(err),
+ )
+ return "", err
+ case <-time.After(retryInterval):
+ }
+ }
+ return "", ctx.Err()
+}
+
+// urlsEqual checks equality of url.URLS between two arrays.
+// This check pass even if an URL is in hostname and opposite is in IP address.
+func urlsEqual(ctx context.Context, lg *zap.Logger, a []url.URL, b []url.URL) (bool, error) {
+ if len(a) != len(b) {
+ return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b))
+ }
+ urls, err := resolveTCPAddrs(ctx, lg, [][]url.URL{a, b})
+ if err != nil {
+ return false, err
+ }
+ preva, prevb := a, b
+ a, b = urls[0], urls[1]
+ sort.Sort(types.URLs(a))
+ sort.Sort(types.URLs(b))
+ for i := range a {
+ if !reflect.DeepEqual(a[i], b[i]) {
+ return false, fmt.Errorf("%q(resolved from %q) != %q(resolved from %q)",
+ a[i].String(), preva[i].String(),
+ b[i].String(), prevb[i].String(),
+ )
+ }
+ }
+ return true, nil
+}
+
+// URLStringsEqual returns "true" if given URLs are valid
+// and resolved to same IP addresses. Otherwise, return "false"
+// and error, if any.
+func URLStringsEqual(ctx context.Context, lg *zap.Logger, a []string, b []string) (bool, error) {
+ if len(a) != len(b) {
+ return false, fmt.Errorf("len(%q) != len(%q)", a, b)
+ }
+ urlsA := make([]url.URL, 0)
+ for _, str := range a {
+ u, err := url.Parse(str)
+ if err != nil {
+ return false, fmt.Errorf("failed to parse %q", str)
+ }
+ urlsA = append(urlsA, *u)
+ }
+ urlsB := make([]url.URL, 0)
+ for _, str := range b {
+ u, err := url.Parse(str)
+ if err != nil {
+ return false, fmt.Errorf("failed to parse %q", str)
+ }
+ urlsB = append(urlsB, *u)
+ }
+ if lg == nil {
+ lg, _ = zap.NewProduction()
+ if lg == nil {
+ lg = zap.NewExample()
+ }
+ }
+ return urlsEqual(ctx, lg, urlsA, urlsB)
+}
+
+func urlsToStrings(us []url.URL) []string {
+ rs := make([]string, len(us))
+ for i := range us {
+ rs[i] = us[i].String()
+ }
+ return rs
+}
+
+func IsNetworkTimeoutError(err error) bool {
+ nerr, ok := err.(net.Error)
+ return ok && nerr.Timeout()
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/netutil/routes.go b/vendor/go.etcd.io/etcd/pkg/netutil/routes.go
new file mode 100644
index 000000000000..3eb6a19ec844
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/netutil/routes.go
@@ -0,0 +1,33 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package netutil
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// GetDefaultHost fetches the a resolvable name that corresponds
+// to the machine's default routable interface
+func GetDefaultHost() (string, error) {
+ return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// GetDefaultInterfaces fetches the device name of default routable interface.
+func GetDefaultInterfaces() (map[string]uint8, error) {
+ return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/netutil/routes_linux.go b/vendor/go.etcd.io/etcd/pkg/netutil/routes_linux.go
new file mode 100644
index 000000000000..5118d3dacd25
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/netutil/routes_linux.go
@@ -0,0 +1,250 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package netutil
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "sort"
+ "syscall"
+
+ "go.etcd.io/etcd/pkg/cpuutil"
+)
+
+var errNoDefaultRoute = fmt.Errorf("could not find default route")
+var errNoDefaultHost = fmt.Errorf("could not find default host")
+var errNoDefaultInterface = fmt.Errorf("could not find default interface")
+
+// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string.
+// An IPv4 address is preferred to an IPv6 address for backward compatibility.
+func GetDefaultHost() (string, error) {
+ rmsgs, rerr := getDefaultRoutes()
+ if rerr != nil {
+ return "", rerr
+ }
+
+ // prioritize IPv4
+ if rmsg, ok := rmsgs[syscall.AF_INET]; ok {
+ if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil {
+ return host, err
+ }
+ delete(rmsgs, syscall.AF_INET)
+ }
+
+ // sort so choice is deterministic
+ var families []int
+ for family := range rmsgs {
+ families = append(families, int(family))
+ }
+ sort.Ints(families)
+
+ for _, f := range families {
+ family := uint8(f)
+ if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil {
+ return host, err
+ }
+ }
+
+ return "", errNoDefaultHost
+}
+
+func chooseHost(family uint8, rmsg *syscall.NetlinkMessage) (string, error) {
+ host, oif, err := parsePREFSRC(rmsg)
+ if host != "" || err != nil {
+ return host, err
+ }
+
+ // prefsrc not detected, fall back to getting address from iface
+ ifmsg, ierr := getIfaceAddr(oif, family)
+ if ierr != nil {
+ return "", ierr
+ }
+
+ attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
+ if aerr != nil {
+ return "", aerr
+ }
+
+ for _, attr := range attrs {
+ // search for RTA_DST because ipv6 doesn't have RTA_SRC
+ if attr.Attr.Type == syscall.RTA_DST {
+ return net.IP(attr.Value).String(), nil
+ }
+ }
+
+ return "", nil
+}
+
+func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) {
+ dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
+ if err != nil {
+ return nil, err
+ }
+
+ msgs, msgErr := syscall.ParseNetlinkMessage(dat)
+ if msgErr != nil {
+ return nil, msgErr
+ }
+
+ routes := make(map[uint8]*syscall.NetlinkMessage)
+ rtmsg := syscall.RtMsg{}
+ for _, m := range msgs {
+ if m.Header.Type != syscall.RTM_NEWROUTE {
+ continue
+ }
+ buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg])
+ if rerr := binary.Read(buf, cpuutil.ByteOrder(), &rtmsg); rerr != nil {
+ continue
+ }
+ if rtmsg.Dst_len == 0 && rtmsg.Table == syscall.RT_TABLE_MAIN {
+ // zero-length Dst_len implies default route
+ msg := m
+ routes[rtmsg.Family] = &msg
+ }
+ }
+
+ if len(routes) > 0 {
+ return routes, nil
+ }
+
+ return nil, errNoDefaultRoute
+}
+
+// Used to get an address of interface.
+func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) {
+ dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family))
+ if err != nil {
+ return nil, err
+ }
+
+ msgs, msgErr := syscall.ParseNetlinkMessage(dat)
+ if msgErr != nil {
+ return nil, msgErr
+ }
+
+ ifaddrmsg := syscall.IfAddrmsg{}
+ for _, m := range msgs {
+ if m.Header.Type != syscall.RTM_NEWADDR {
+ continue
+ }
+ buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg])
+ if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil {
+ continue
+ }
+ if ifaddrmsg.Index == idx {
+ return &m, nil
+ }
+ }
+
+ return nil, fmt.Errorf("could not find address for interface index %v", idx)
+
+}
+
+// Used to get a name of interface.
+func getIfaceLink(idx uint32) (*syscall.NetlinkMessage, error) {
+ dat, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC)
+ if err != nil {
+ return nil, err
+ }
+
+ msgs, msgErr := syscall.ParseNetlinkMessage(dat)
+ if msgErr != nil {
+ return nil, msgErr
+ }
+
+ ifinfomsg := syscall.IfInfomsg{}
+ for _, m := range msgs {
+ if m.Header.Type != syscall.RTM_NEWLINK {
+ continue
+ }
+ buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfInfomsg])
+ if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifinfomsg); rerr != nil {
+ continue
+ }
+ if ifinfomsg.Index == int32(idx) {
+ return &m, nil
+ }
+ }
+
+ return nil, fmt.Errorf("could not find link for interface index %v", idx)
+}
+
+// GetDefaultInterfaces gets names of interfaces and returns a map[interface]families.
+func GetDefaultInterfaces() (map[string]uint8, error) {
+ interfaces := make(map[string]uint8)
+ rmsgs, rerr := getDefaultRoutes()
+ if rerr != nil {
+ return interfaces, rerr
+ }
+
+ for family, rmsg := range rmsgs {
+ _, oif, err := parsePREFSRC(rmsg)
+ if err != nil {
+ return interfaces, err
+ }
+
+ ifmsg, ierr := getIfaceLink(oif)
+ if ierr != nil {
+ return interfaces, ierr
+ }
+
+ attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
+ if aerr != nil {
+ return interfaces, aerr
+ }
+
+ for _, attr := range attrs {
+ if attr.Attr.Type == syscall.IFLA_IFNAME {
+ // key is an interface name
+ // possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack
+ interfaces[string(attr.Value[:len(attr.Value)-1])] += family
+ }
+ }
+ }
+ if len(interfaces) > 0 {
+ return interfaces, nil
+ }
+ return interfaces, errNoDefaultInterface
+}
+
+// parsePREFSRC returns preferred source address and output interface index (RTA_OIF).
+func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) {
+ var attrs []syscall.NetlinkRouteAttr
+ attrs, err = syscall.ParseNetlinkRouteAttr(m)
+ if err != nil {
+ return "", 0, err
+ }
+
+ for _, attr := range attrs {
+ if attr.Attr.Type == syscall.RTA_PREFSRC {
+ host = net.IP(attr.Value).String()
+ }
+ if attr.Attr.Type == syscall.RTA_OIF {
+ oif = cpuutil.ByteOrder().Uint32(attr.Value)
+ }
+ if host != "" && oif != uint32(0) {
+ break
+ }
+ }
+
+ if oif == 0 {
+ err = errNoDefaultRoute
+ }
+ return host, oif, err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/pathutil/path.go b/vendor/go.etcd.io/etcd/pkg/pathutil/path.go
new file mode 100644
index 000000000000..f26254ba933e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/pathutil/path.go
@@ -0,0 +1,31 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pathutil implements utility functions for handling slash-separated
+// paths.
+package pathutil
+
+import "path"
+
+// CanonicalURLPath returns the canonical url path for p, which follows the rules:
+// 1. the path always starts with "/"
+// 2. replace multiple slashes with a single slash
+// 3. replace each '.' '..' path name element with equivalent one
+// 4. keep the trailing slash
+// The function is borrowed from stdlib http.cleanPath in server.go.
+func CanonicalURLPath(p string) string {
+ if p == "" {
+ return "/"
+ }
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ np := path.Clean(p)
+ // path.Clean removes trailing slash except for root,
+ // put the trailing slash back if necessary.
+ if p[len(p)-1] == '/' && np != "/" {
+ np += "/"
+ }
+ return np
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/pbutil/pbutil.go b/vendor/go.etcd.io/etcd/pkg/pbutil/pbutil.go
new file mode 100644
index 000000000000..53167ffa51ca
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/pbutil/pbutil.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil defines interfaces for handling Protocol Buffer objects.
+package pbutil
+
+import "github.com/coreos/pkg/capnslog"
+
+var (
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "pkg/pbutil")
+)
+
+type Marshaler interface {
+ Marshal() (data []byte, err error)
+}
+
+type Unmarshaler interface {
+ Unmarshal(data []byte) error
+}
+
+func MustMarshal(m Marshaler) []byte {
+ d, err := m.Marshal()
+ if err != nil {
+ plog.Panicf("marshal should never fail (%v)", err)
+ }
+ return d
+}
+
+func MustUnmarshal(um Unmarshaler, data []byte) {
+ if err := um.Unmarshal(data); err != nil {
+ plog.Panicf("unmarshal should never fail (%v)", err)
+ }
+}
+
+func MaybeUnmarshal(um Unmarshaler, data []byte) bool {
+ if err := um.Unmarshal(data); err != nil {
+ return false
+ }
+ return true
+}
+
+func GetBool(v *bool) (vv bool, set bool) {
+ if v == nil {
+ return false, false
+ }
+ return *v, true
+}
+
+func Boolp(b bool) *bool { return &b }
diff --git a/vendor/go.etcd.io/etcd/pkg/runtime/fds_linux.go b/vendor/go.etcd.io/etcd/pkg/runtime/fds_linux.go
new file mode 100644
index 000000000000..8e9359db28ca
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/runtime/fds_linux.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package runtime implements utility functions for runtime systems.
+package runtime
+
+import (
+ "io/ioutil"
+ "syscall"
+)
+
+func FDLimit() (uint64, error) {
+ var rlimit syscall.Rlimit
+ if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
+ return 0, err
+ }
+ return rlimit.Cur, nil
+}
+
+func FDUsage() (uint64, error) {
+ fds, err := ioutil.ReadDir("/proc/self/fd")
+ if err != nil {
+ return 0, err
+ }
+ return uint64(len(fds)), nil
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/runtime/fds_other.go b/vendor/go.etcd.io/etcd/pkg/runtime/fds_other.go
new file mode 100644
index 000000000000..0cbdb88c7a60
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/runtime/fds_other.go
@@ -0,0 +1,30 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !linux
+
+package runtime
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func FDLimit() (uint64, error) {
+ return 0, fmt.Errorf("cannot get FDLimit on %s", runtime.GOOS)
+}
+
+func FDUsage() (uint64, error) {
+ return 0, fmt.Errorf("cannot get FDUsage on %s", runtime.GOOS)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/schedule/doc.go b/vendor/go.etcd.io/etcd/pkg/schedule/doc.go
new file mode 100644
index 000000000000..cca2c75fb6a9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/schedule/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package schedule provides mechanisms and policies for scheduling units of work.
+package schedule
diff --git a/vendor/go.etcd.io/etcd/pkg/schedule/schedule.go b/vendor/go.etcd.io/etcd/pkg/schedule/schedule.go
new file mode 100644
index 000000000000..234d01989dfb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/schedule/schedule.go
@@ -0,0 +1,165 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schedule
+
+import (
+ "context"
+ "sync"
+)
+
+type Job func(context.Context)
+
+// Scheduler can schedule jobs.
+type Scheduler interface {
+ // Schedule asks the scheduler to schedule a job defined by the given func.
+ // Schedule to a stopped scheduler might panic.
+ Schedule(j Job)
+
+ // Pending returns number of pending jobs
+ Pending() int
+
+ // Scheduled returns the number of scheduled jobs (excluding pending jobs)
+ Scheduled() int
+
+ // Finished returns the number of finished jobs
+ Finished() int
+
+ // WaitFinish waits until at least n job are finished and all pending jobs are finished.
+ WaitFinish(n int)
+
+ // Stop stops the scheduler.
+ Stop()
+}
+
+type fifo struct {
+ mu sync.Mutex
+
+ resume chan struct{}
+ scheduled int
+ finished int
+ pendings []Job
+
+ ctx context.Context
+ cancel context.CancelFunc
+
+ finishCond *sync.Cond
+ donec chan struct{}
+}
+
+// NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO
+// order sequentially
+func NewFIFOScheduler() Scheduler {
+ f := &fifo{
+ resume: make(chan struct{}, 1),
+ donec: make(chan struct{}, 1),
+ }
+ f.finishCond = sync.NewCond(&f.mu)
+ f.ctx, f.cancel = context.WithCancel(context.Background())
+ go f.run()
+ return f
+}
+
+// Schedule schedules a job that will be ran in FIFO order sequentially.
+func (f *fifo) Schedule(j Job) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.cancel == nil {
+ panic("schedule: schedule to stopped scheduler")
+ }
+
+ if len(f.pendings) == 0 {
+ select {
+ case f.resume <- struct{}{}:
+ default:
+ }
+ }
+ f.pendings = append(f.pendings, j)
+}
+
+func (f *fifo) Pending() int {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return len(f.pendings)
+}
+
+func (f *fifo) Scheduled() int {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.scheduled
+}
+
+func (f *fifo) Finished() int {
+ f.finishCond.L.Lock()
+ defer f.finishCond.L.Unlock()
+ return f.finished
+}
+
+func (f *fifo) WaitFinish(n int) {
+ f.finishCond.L.Lock()
+ for f.finished < n || len(f.pendings) != 0 {
+ f.finishCond.Wait()
+ }
+ f.finishCond.L.Unlock()
+}
+
+// Stop stops the scheduler and cancels all pending jobs.
+func (f *fifo) Stop() {
+ f.mu.Lock()
+ f.cancel()
+ f.cancel = nil
+ f.mu.Unlock()
+ <-f.donec
+}
+
+func (f *fifo) run() {
+ // TODO: recover from job panic?
+ defer func() {
+ close(f.donec)
+ close(f.resume)
+ }()
+
+ for {
+ var todo Job
+ f.mu.Lock()
+ if len(f.pendings) != 0 {
+ f.scheduled++
+ todo = f.pendings[0]
+ }
+ f.mu.Unlock()
+ if todo == nil {
+ select {
+ case <-f.resume:
+ case <-f.ctx.Done():
+ f.mu.Lock()
+ pendings := f.pendings
+ f.pendings = nil
+ f.mu.Unlock()
+ // clean up pending jobs
+ for _, todo := range pendings {
+ todo(f.ctx)
+ }
+ return
+ }
+ } else {
+ todo(f.ctx)
+ f.finishCond.L.Lock()
+ f.finished++
+ f.pendings = f.pendings[1:]
+ f.finishCond.Broadcast()
+ f.finishCond.L.Unlock()
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/srv/srv.go b/vendor/go.etcd.io/etcd/pkg/srv/srv.go
new file mode 100644
index 000000000000..c3560026d481
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/srv/srv.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package srv looks up DNS SRV records.
+package srv
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+
+ "go.etcd.io/etcd/pkg/types"
+)
+
+var (
+ // indirection for testing
+ lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
+ resolveTCPAddr = net.ResolveTCPAddr
+)
+
+// GetCluster gets the cluster information via DNS discovery.
+// Also sees each entry as a separate instance.
+func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) {
+ tempName := int(0)
+ tcp2ap := make(map[string]url.URL)
+
+ // First, resolve the apurls
+ for _, url := range apurls {
+ tcpAddr, err := resolveTCPAddr("tcp", url.Host)
+ if err != nil {
+ return nil, err
+ }
+ tcp2ap[tcpAddr.String()] = url
+ }
+
+ stringParts := []string{}
+ updateNodeMap := func(service, scheme string) error {
+ _, addrs, err := lookupSRV(service, "tcp", dns)
+ if err != nil {
+ return err
+ }
+ for _, srv := range addrs {
+ port := fmt.Sprintf("%d", srv.Port)
+ host := net.JoinHostPort(srv.Target, port)
+ tcpAddr, terr := resolveTCPAddr("tcp", host)
+ if terr != nil {
+ err = terr
+ continue
+ }
+ n := ""
+ url, ok := tcp2ap[tcpAddr.String()]
+ if ok {
+ n = name
+ }
+ if n == "" {
+ n = fmt.Sprintf("%d", tempName)
+ tempName++
+ }
+ // SRV records have a trailing dot but URL shouldn't.
+ shortHost := strings.TrimSuffix(srv.Target, ".")
+ urlHost := net.JoinHostPort(shortHost, port)
+ if ok && url.Scheme != scheme {
+ err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
+ } else {
+ stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
+ }
+ }
+ if len(stringParts) == 0 {
+ return err
+ }
+ return nil
+ }
+
+ err := updateNodeMap(service, serviceScheme)
+ if err != nil {
+ return nil, fmt.Errorf("error querying DNS SRV records for _%s %s", service, err)
+ }
+ return stringParts, nil
+}
+
+type SRVClients struct {
+ Endpoints []string
+ SRVs []*net.SRV
+}
+
+// GetClient looks up the client endpoints for a service and domain.
+func GetClient(service, domain string, serviceName string) (*SRVClients, error) {
+ var urls []*url.URL
+ var srvs []*net.SRV
+
+ updateURLs := func(service, scheme string) error {
+ _, addrs, err := lookupSRV(service, "tcp", domain)
+ if err != nil {
+ return err
+ }
+ for _, srv := range addrs {
+ urls = append(urls, &url.URL{
+ Scheme: scheme,
+ Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
+ })
+ }
+ srvs = append(srvs, addrs...)
+ return nil
+ }
+
+ errHTTPS := updateURLs(GetSRVService(service, serviceName, "https"), "https")
+ errHTTP := updateURLs(GetSRVService(service, serviceName, "http"), "http")
+
+ if errHTTPS != nil && errHTTP != nil {
+ return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
+ }
+
+ endpoints := make([]string, len(urls))
+ for i := range urls {
+ endpoints[i] = urls[i].String()
+ }
+ return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
+}
+
+// GetSRVService generates a SRV service including an optional suffix.
+func GetSRVService(service, serviceName string, scheme string) (SRVService string) {
+ if scheme == "https" {
+ service = fmt.Sprintf("%s-ssl", service)
+ }
+
+ if serviceName != "" {
+ return fmt.Sprintf("%s-%s", service, serviceName)
+ }
+ return service
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/traceutil/trace.go b/vendor/go.etcd.io/etcd/pkg/traceutil/trace.go
new file mode 100644
index 000000000000..2d247dd9accb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/traceutil/trace.go
@@ -0,0 +1,172 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package traceutil implements tracing utilities using "context".
+package traceutil
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "math/rand"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+const (
+ TraceKey = "trace"
+ StartTimeKey = "startTime"
+)
+
+// Field is a kv pair to record additional details of the trace.
+type Field struct {
+ Key string
+ Value interface{}
+}
+
+func (f *Field) format() string {
+ return fmt.Sprintf("%s:%v; ", f.Key, f.Value)
+}
+
+func writeFields(fields []Field) string {
+ if len(fields) == 0 {
+ return ""
+ }
+ var buf bytes.Buffer
+ buf.WriteString("{")
+ for _, f := range fields {
+ buf.WriteString(f.format())
+ }
+ buf.WriteString("}")
+ return buf.String()
+}
+
+type Trace struct {
+ operation string
+ lg *zap.Logger
+ fields []Field
+ startTime time.Time
+ steps []step
+ stepDisabled bool
+}
+
+type step struct {
+ time time.Time
+ msg string
+ fields []Field
+}
+
+func New(op string, lg *zap.Logger, fields ...Field) *Trace {
+ return &Trace{operation: op, lg: lg, startTime: time.Now(), fields: fields}
+}
+
+// TODO returns a non-nil, empty Trace
+func TODO() *Trace {
+ return &Trace{}
+}
+
+func Get(ctx context.Context) *Trace {
+ if trace, ok := ctx.Value(TraceKey).(*Trace); ok && trace != nil {
+ return trace
+ }
+ return TODO()
+}
+
+func (t *Trace) GetStartTime() time.Time {
+ return t.startTime
+}
+
+func (t *Trace) SetStartTime(time time.Time) {
+ t.startTime = time
+}
+
+func (t *Trace) InsertStep(at int, time time.Time, msg string, fields ...Field) {
+ newStep := step{time, msg, fields}
+ if at < len(t.steps) {
+ t.steps = append(t.steps[:at+1], t.steps[at:]...)
+ t.steps[at] = newStep
+ } else {
+ t.steps = append(t.steps, newStep)
+ }
+}
+
+// Step adds step to trace
+func (t *Trace) Step(msg string, fields ...Field) {
+ if !t.stepDisabled {
+ t.steps = append(t.steps, step{time: time.Now(), msg: msg, fields: fields})
+ }
+}
+
+// DisableStep sets the flag to prevent the trace from adding steps
+func (t *Trace) DisableStep() {
+ t.stepDisabled = true
+}
+
+// EnableStep re-enable the trace to add steps
+func (t *Trace) EnableStep() {
+ t.stepDisabled = false
+}
+
+func (t *Trace) AddField(fields ...Field) {
+ for _, f := range fields {
+ t.fields = append(t.fields, f)
+ }
+}
+
+// Log dumps all steps in the Trace
+func (t *Trace) Log() {
+ t.LogWithStepThreshold(0)
+}
+
+// LogIfLong dumps logs if the duration is longer than threshold
+func (t *Trace) LogIfLong(threshold time.Duration) {
+ if time.Since(t.startTime) > threshold {
+ stepThreshold := threshold / time.Duration(len(t.steps)+1)
+ t.LogWithStepThreshold(stepThreshold)
+ }
+}
+
+// LogWithStepThreshold only dumps step whose duration is longer than step threshold
+func (t *Trace) LogWithStepThreshold(threshold time.Duration) {
+ msg, fs := t.logInfo(threshold)
+ if t.lg != nil {
+ t.lg.Info(msg, fs...)
+ }
+}
+
+func (t *Trace) logInfo(threshold time.Duration) (string, []zap.Field) {
+ endTime := time.Now()
+ totalDuration := endTime.Sub(t.startTime)
+ traceNum := rand.Int31()
+ msg := fmt.Sprintf("trace[%d] %s", traceNum, t.operation)
+
+ var steps []string
+ lastStepTime := t.startTime
+ for _, step := range t.steps {
+ stepDuration := step.time.Sub(lastStepTime)
+ if stepDuration > threshold {
+ steps = append(steps, fmt.Sprintf("trace[%d] '%v' %s (duration: %v)",
+ traceNum, step.msg, writeFields(step.fields), stepDuration))
+ }
+ lastStepTime = step.time
+ }
+
+ fs := []zap.Field{zap.String("detail", writeFields(t.fields)),
+ zap.Duration("duration", totalDuration),
+ zap.Time("start", t.startTime),
+ zap.Time("end", endTime),
+ zap.Strings("steps", steps)}
+ return msg, fs
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/wait/wait.go b/vendor/go.etcd.io/etcd/pkg/wait/wait.go
new file mode 100644
index 000000000000..9b1df419e292
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/wait/wait.go
@@ -0,0 +1,91 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package wait provides utility functions for polling, listening using Go
+// channel.
+package wait
+
+import (
+ "log"
+ "sync"
+)
+
+// Wait is an interface that provides the ability to wait and trigger events that
+// are associated with IDs.
+type Wait interface {
+ // Register waits returns a chan that waits on the given ID.
+ // The chan will be triggered when Trigger is called with
+ // the same ID.
+ Register(id uint64) <-chan interface{}
+ // Trigger triggers the waiting chans with the given ID.
+ Trigger(id uint64, x interface{})
+ IsRegistered(id uint64) bool
+}
+
+type list struct {
+ l sync.RWMutex
+ m map[uint64]chan interface{}
+}
+
+// New creates a Wait.
+func New() Wait {
+ return &list{m: make(map[uint64]chan interface{})}
+}
+
+func (w *list) Register(id uint64) <-chan interface{} {
+ w.l.Lock()
+ defer w.l.Unlock()
+ ch := w.m[id]
+ if ch == nil {
+ ch = make(chan interface{}, 1)
+ w.m[id] = ch
+ } else {
+ log.Panicf("dup id %x", id)
+ }
+ return ch
+}
+
+func (w *list) Trigger(id uint64, x interface{}) {
+ w.l.Lock()
+ ch := w.m[id]
+ delete(w.m, id)
+ w.l.Unlock()
+ if ch != nil {
+ ch <- x
+ close(ch)
+ }
+}
+
+func (w *list) IsRegistered(id uint64) bool {
+ w.l.RLock()
+ defer w.l.RUnlock()
+ _, ok := w.m[id]
+ return ok
+}
+
+type waitWithResponse struct {
+ ch <-chan interface{}
+}
+
+func NewWithResponse(ch <-chan interface{}) Wait {
+ return &waitWithResponse{ch: ch}
+}
+
+func (w *waitWithResponse) Register(id uint64) <-chan interface{} {
+ return w.ch
+}
+func (w *waitWithResponse) Trigger(id uint64, x interface{}) {}
+func (w *waitWithResponse) IsRegistered(id uint64) bool {
+ panic("waitWithResponse.IsRegistered() shouldn't be called")
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/wait/wait_time.go b/vendor/go.etcd.io/etcd/pkg/wait/wait_time.go
new file mode 100644
index 000000000000..297e48a47d76
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/wait/wait_time.go
@@ -0,0 +1,66 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wait
+
+import "sync"
+
+type WaitTime interface {
+ // Wait returns a chan that waits on the given logical deadline.
+ // The chan will be triggered when Trigger is called with a
+ // deadline that is later than the one it is waiting for.
+ Wait(deadline uint64) <-chan struct{}
+ // Trigger triggers all the waiting chans with an earlier logical deadline.
+ Trigger(deadline uint64)
+}
+
+var closec chan struct{}
+
+func init() { closec = make(chan struct{}); close(closec) }
+
+type timeList struct {
+ l sync.Mutex
+ lastTriggerDeadline uint64
+ m map[uint64]chan struct{}
+}
+
+func NewTimeList() *timeList {
+ return &timeList{m: make(map[uint64]chan struct{})}
+}
+
+func (tl *timeList) Wait(deadline uint64) <-chan struct{} {
+ tl.l.Lock()
+ defer tl.l.Unlock()
+ if tl.lastTriggerDeadline >= deadline {
+ return closec
+ }
+ ch := tl.m[deadline]
+ if ch == nil {
+ ch = make(chan struct{})
+ tl.m[deadline] = ch
+ }
+ return ch
+}
+
+func (tl *timeList) Trigger(deadline uint64) {
+ tl.l.Lock()
+ defer tl.l.Unlock()
+ tl.lastTriggerDeadline = deadline
+ for t, ch := range tl.m {
+ if t <= deadline {
+ delete(tl.m, t)
+ close(ch)
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go
new file mode 100644
index 000000000000..59dbe6b0e88e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go
@@ -0,0 +1,93 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ grpc "google.golang.org/grpc"
+)
+
+type as2ac struct{ as pb.AuthServer }
+
+func AuthServerToAuthClient(as pb.AuthServer) pb.AuthClient {
+ return &as2ac{as}
+}
+
+func (s *as2ac) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (*pb.AuthEnableResponse, error) {
+ return s.as.AuthEnable(ctx, in)
+}
+
+func (s *as2ac) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (*pb.AuthDisableResponse, error) {
+ return s.as.AuthDisable(ctx, in)
+}
+
+func (s *as2ac) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (*pb.AuthenticateResponse, error) {
+ return s.as.Authenticate(ctx, in)
+}
+
+func (s *as2ac) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (*pb.AuthRoleAddResponse, error) {
+ return s.as.RoleAdd(ctx, in)
+}
+
+func (s *as2ac) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (*pb.AuthRoleDeleteResponse, error) {
+ return s.as.RoleDelete(ctx, in)
+}
+
+func (s *as2ac) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (*pb.AuthRoleGetResponse, error) {
+ return s.as.RoleGet(ctx, in)
+}
+
+func (s *as2ac) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (*pb.AuthRoleListResponse, error) {
+ return s.as.RoleList(ctx, in)
+}
+
+func (s *as2ac) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleRevokePermissionResponse, error) {
+ return s.as.RoleRevokePermission(ctx, in)
+}
+
+func (s *as2ac) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleGrantPermissionResponse, error) {
+ return s.as.RoleGrantPermission(ctx, in)
+}
+
+func (s *as2ac) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (*pb.AuthUserDeleteResponse, error) {
+ return s.as.UserDelete(ctx, in)
+}
+
+func (s *as2ac) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (*pb.AuthUserAddResponse, error) {
+ return s.as.UserAdd(ctx, in)
+}
+
+func (s *as2ac) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (*pb.AuthUserGetResponse, error) {
+ return s.as.UserGet(ctx, in)
+}
+
+func (s *as2ac) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (*pb.AuthUserListResponse, error) {
+ return s.as.UserList(ctx, in)
+}
+
+func (s *as2ac) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserGrantRoleResponse, error) {
+ return s.as.UserGrantRole(ctx, in)
+}
+
+func (s *as2ac) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserRevokeRoleResponse, error) {
+ return s.as.UserRevokeRole(ctx, in)
+}
+
+func (s *as2ac) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*pb.AuthUserChangePasswordResponse, error) {
+ return s.as.UserChangePassword(ctx, in)
+}
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/chan_stream.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/chan_stream.go
new file mode 100644
index 000000000000..1af514b1fdd3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/chan_stream.go
@@ -0,0 +1,167 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// chanServerStream implements grpc.ServerStream with a chanStream
+type chanServerStream struct {
+ headerc chan<- metadata.MD
+ trailerc chan<- metadata.MD
+ grpc.Stream
+
+ headers []metadata.MD
+}
+
+func (ss *chanServerStream) SendHeader(md metadata.MD) error {
+ if ss.headerc == nil {
+ return errAlreadySentHeader
+ }
+ outmd := make(map[string][]string)
+ for _, h := range append(ss.headers, md) {
+ for k, v := range h {
+ outmd[k] = v
+ }
+ }
+ select {
+ case ss.headerc <- outmd:
+ ss.headerc = nil
+ ss.headers = nil
+ return nil
+ case <-ss.Context().Done():
+ }
+ return ss.Context().Err()
+}
+
+func (ss *chanServerStream) SetHeader(md metadata.MD) error {
+ if ss.headerc == nil {
+ return errAlreadySentHeader
+ }
+ ss.headers = append(ss.headers, md)
+ return nil
+}
+
+func (ss *chanServerStream) SetTrailer(md metadata.MD) {
+ ss.trailerc <- md
+}
+
+// chanClientStream implements grpc.ClientStream with a chanStream
+type chanClientStream struct {
+ headerc <-chan metadata.MD
+ trailerc <-chan metadata.MD
+ *chanStream
+}
+
+func (cs *chanClientStream) Header() (metadata.MD, error) {
+ select {
+ case md := <-cs.headerc:
+ return md, nil
+ case <-cs.Context().Done():
+ }
+ return nil, cs.Context().Err()
+}
+
+func (cs *chanClientStream) Trailer() metadata.MD {
+ select {
+ case md := <-cs.trailerc:
+ return md
+ case <-cs.Context().Done():
+ return nil
+ }
+}
+
+func (cs *chanClientStream) CloseSend() error {
+ close(cs.chanStream.sendc)
+ return nil
+}
+
+// chanStream implements grpc.Stream using channels
+type chanStream struct {
+ recvc <-chan interface{}
+ sendc chan<- interface{}
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+func (s *chanStream) Context() context.Context { return s.ctx }
+
+func (s *chanStream) SendMsg(m interface{}) error {
+ select {
+ case s.sendc <- m:
+ if err, ok := m.(error); ok {
+ return err
+ }
+ return nil
+ case <-s.ctx.Done():
+ }
+ return s.ctx.Err()
+}
+
+func (s *chanStream) RecvMsg(m interface{}) error {
+ v := m.(*interface{})
+ for {
+ select {
+ case msg, ok := <-s.recvc:
+ if !ok {
+ return status.Error(codes.Canceled, "the client connection is closing")
+ }
+ if err, ok := msg.(error); ok {
+ return err
+ }
+ *v = msg
+ return nil
+ case <-s.ctx.Done():
+ }
+ if len(s.recvc) == 0 {
+ // prioritize any pending recv messages over canceled context
+ break
+ }
+ }
+ return s.ctx.Err()
+}
+
+func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream {
+ // ch1 is buffered so server can send error on close
+ ch1, ch2 := make(chan interface{}, 1), make(chan interface{})
+ headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1)
+
+ cctx, ccancel := context.WithCancel(ctx)
+ cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel}
+ cs := chanClientStream{headerc, trailerc, cli}
+
+ sctx, scancel := context.WithCancel(ctx)
+ srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel}
+ ss := chanServerStream{headerc, trailerc, srv, nil}
+
+ go func() {
+ if err := ssHandler(ss); err != nil {
+ select {
+ case srv.sendc <- err:
+ case <-sctx.Done():
+ case <-cctx.Done():
+ }
+ }
+ scancel()
+ ccancel()
+ }()
+ return cs
+}
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go
new file mode 100644
index 000000000000..73a6fdfcba53
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go
@@ -0,0 +1,49 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ "google.golang.org/grpc"
+)
+
+type cls2clc struct{ cls pb.ClusterServer }
+
+func ClusterServerToClusterClient(cls pb.ClusterServer) pb.ClusterClient {
+ return &cls2clc{cls}
+}
+
+func (s *cls2clc) MemberList(ctx context.Context, r *pb.MemberListRequest, opts ...grpc.CallOption) (*pb.MemberListResponse, error) {
+ return s.cls.MemberList(ctx, r)
+}
+
+func (s *cls2clc) MemberAdd(ctx context.Context, r *pb.MemberAddRequest, opts ...grpc.CallOption) (*pb.MemberAddResponse, error) {
+ return s.cls.MemberAdd(ctx, r)
+}
+
+func (s *cls2clc) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest, opts ...grpc.CallOption) (*pb.MemberUpdateResponse, error) {
+ return s.cls.MemberUpdate(ctx, r)
+}
+
+func (s *cls2clc) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest, opts ...grpc.CallOption) (*pb.MemberRemoveResponse, error) {
+ return s.cls.MemberRemove(ctx, r)
+}
+
+func (s *cls2clc) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest, opts ...grpc.CallOption) (*pb.MemberPromoteResponse, error) {
+ return s.cls.MemberPromote(ctx, r)
+}
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/doc.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/doc.go
new file mode 100644
index 000000000000..7170be233046
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package adapter provides gRPC adapters between client and server
+// gRPC interfaces without needing to go through a gRPC connection.
+package adapter
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/election_client_adapter.go
new file mode 100644
index 000000000000..4722be040395
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/election_client_adapter.go
@@ -0,0 +1,80 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb"
+
+ "google.golang.org/grpc"
+)
+
+type es2ec struct{ es v3electionpb.ElectionServer }
+
+func ElectionServerToElectionClient(es v3electionpb.ElectionServer) v3electionpb.ElectionClient {
+ return &es2ec{es}
+}
+
+func (s *es2ec) Campaign(ctx context.Context, r *v3electionpb.CampaignRequest, opts ...grpc.CallOption) (*v3electionpb.CampaignResponse, error) {
+ return s.es.Campaign(ctx, r)
+}
+
+func (s *es2ec) Proclaim(ctx context.Context, r *v3electionpb.ProclaimRequest, opts ...grpc.CallOption) (*v3electionpb.ProclaimResponse, error) {
+ return s.es.Proclaim(ctx, r)
+}
+
+func (s *es2ec) Leader(ctx context.Context, r *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (*v3electionpb.LeaderResponse, error) {
+ return s.es.Leader(ctx, r)
+}
+
+func (s *es2ec) Resign(ctx context.Context, r *v3electionpb.ResignRequest, opts ...grpc.CallOption) (*v3electionpb.ResignResponse, error) {
+ return s.es.Resign(ctx, r)
+}
+
+func (s *es2ec) Observe(ctx context.Context, in *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (v3electionpb.Election_ObserveClient, error) {
+ cs := newPipeStream(ctx, func(ss chanServerStream) error {
+ return s.es.Observe(in, &es2ecServerStream{ss})
+ })
+ return &es2ecClientStream{cs}, nil
+}
+
+// es2ecClientStream implements Election_ObserveClient
+type es2ecClientStream struct{ chanClientStream }
+
+// es2ecServerStream implements Election_ObserveServer
+type es2ecServerStream struct{ chanServerStream }
+
+func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error {
+ return s.SendMsg(rr)
+}
+func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) {
+ var v interface{}
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*v3electionpb.LeaderResponse), nil
+}
+
+func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error {
+ return s.SendMsg(rr)
+}
+func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) {
+ var v interface{}
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*v3electionpb.LeaderRequest), nil
+}
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go
new file mode 100644
index 000000000000..b1a782099477
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ grpc "google.golang.org/grpc"
+)
+
+type kvs2kvc struct{ kvs pb.KVServer }
+
+func KvServerToKvClient(kvs pb.KVServer) pb.KVClient {
+ return &kvs2kvc{kvs}
+}
+
+func (s *kvs2kvc) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (*pb.RangeResponse, error) {
+ return s.kvs.Range(ctx, in)
+}
+
+func (s *kvs2kvc) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (*pb.PutResponse, error) {
+ return s.kvs.Put(ctx, in)
+}
+
+func (s *kvs2kvc) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (*pb.DeleteRangeResponse, error) {
+ return s.kvs.DeleteRange(ctx, in)
+}
+
+func (s *kvs2kvc) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (*pb.TxnResponse, error) {
+ return s.kvs.Txn(ctx, in)
+}
+
+func (s *kvs2kvc) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (*pb.CompactionResponse, error) {
+ return s.kvs.Compact(ctx, in)
+}
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go
new file mode 100644
index 000000000000..a58408f9f291
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go
@@ -0,0 +1,82 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ "google.golang.org/grpc"
+)
+
+type ls2lc struct {
+ leaseServer pb.LeaseServer
+}
+
+func LeaseServerToLeaseClient(ls pb.LeaseServer) pb.LeaseClient {
+ return &ls2lc{ls}
+}
+
+func (c *ls2lc) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (*pb.LeaseGrantResponse, error) {
+ return c.leaseServer.LeaseGrant(ctx, in)
+}
+
+func (c *ls2lc) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (*pb.LeaseRevokeResponse, error) {
+ return c.leaseServer.LeaseRevoke(ctx, in)
+}
+
+func (c *ls2lc) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (pb.Lease_LeaseKeepAliveClient, error) {
+ cs := newPipeStream(ctx, func(ss chanServerStream) error {
+ return c.leaseServer.LeaseKeepAlive(&ls2lcServerStream{ss})
+ })
+ return &ls2lcClientStream{cs}, nil
+}
+
+func (c *ls2lc) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*pb.LeaseTimeToLiveResponse, error) {
+ return c.leaseServer.LeaseTimeToLive(ctx, in)
+}
+
+func (c *ls2lc) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (*pb.LeaseLeasesResponse, error) {
+ return c.leaseServer.LeaseLeases(ctx, in)
+}
+
+// ls2lcClientStream implements Lease_LeaseKeepAliveClient
+type ls2lcClientStream struct{ chanClientStream }
+
+// ls2lcServerStream implements Lease_LeaseKeepAliveServer
+type ls2lcServerStream struct{ chanServerStream }
+
+func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error {
+ return s.SendMsg(rr)
+}
+func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) {
+ var v interface{}
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.LeaseKeepAliveResponse), nil
+}
+
+func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error {
+ return s.SendMsg(rr)
+}
+func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) {
+ var v interface{}
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.LeaseKeepAliveRequest), nil
+}
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go
new file mode 100644
index 000000000000..65b5641d34f3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go
@@ -0,0 +1,37 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb"
+
+ "google.golang.org/grpc"
+)
+
+type ls2lsc struct{ ls v3lockpb.LockServer }
+
+func LockServerToLockClient(ls v3lockpb.LockServer) v3lockpb.LockClient {
+ return &ls2lsc{ls}
+}
+
+func (s *ls2lsc) Lock(ctx context.Context, r *v3lockpb.LockRequest, opts ...grpc.CallOption) (*v3lockpb.LockResponse, error) {
+ return s.ls.Lock(ctx, r)
+}
+
+func (s *ls2lsc) Unlock(ctx context.Context, r *v3lockpb.UnlockRequest, opts ...grpc.CallOption) (*v3lockpb.UnlockResponse, error) {
+ return s.ls.Unlock(ctx, r)
+}
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go
new file mode 100644
index 000000000000..4a8781b13adf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go
@@ -0,0 +1,88 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+
+ "google.golang.org/grpc"
+)
+
+type mts2mtc struct{ mts pb.MaintenanceServer }
+
+func MaintenanceServerToMaintenanceClient(mts pb.MaintenanceServer) pb.MaintenanceClient {
+ return &mts2mtc{mts}
+}
+
+func (s *mts2mtc) Alarm(ctx context.Context, r *pb.AlarmRequest, opts ...grpc.CallOption) (*pb.AlarmResponse, error) {
+ return s.mts.Alarm(ctx, r)
+}
+
+func (s *mts2mtc) Status(ctx context.Context, r *pb.StatusRequest, opts ...grpc.CallOption) (*pb.StatusResponse, error) {
+ return s.mts.Status(ctx, r)
+}
+
+func (s *mts2mtc) Defragment(ctx context.Context, dr *pb.DefragmentRequest, opts ...grpc.CallOption) (*pb.DefragmentResponse, error) {
+ return s.mts.Defragment(ctx, dr)
+}
+
+func (s *mts2mtc) Hash(ctx context.Context, r *pb.HashRequest, opts ...grpc.CallOption) (*pb.HashResponse, error) {
+ return s.mts.Hash(ctx, r)
+}
+
+func (s *mts2mtc) HashKV(ctx context.Context, r *pb.HashKVRequest, opts ...grpc.CallOption) (*pb.HashKVResponse, error) {
+ return s.mts.HashKV(ctx, r)
+}
+
+func (s *mts2mtc) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest, opts ...grpc.CallOption) (*pb.MoveLeaderResponse, error) {
+ return s.mts.MoveLeader(ctx, r)
+}
+
+func (s *mts2mtc) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (pb.Maintenance_SnapshotClient, error) {
+ cs := newPipeStream(ctx, func(ss chanServerStream) error {
+ return s.mts.Snapshot(in, &ss2scServerStream{ss})
+ })
+ return &ss2scClientStream{cs}, nil
+}
+
+// ss2scClientStream implements Maintenance_SnapshotClient
+type ss2scClientStream struct{ chanClientStream }
+
+// ss2scServerStream implements Maintenance_SnapshotServer
+type ss2scServerStream struct{ chanServerStream }
+
+func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error {
+ return s.SendMsg(rr)
+}
+func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) {
+ var v interface{}
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.SnapshotResponse), nil
+}
+
+func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error {
+ return s.SendMsg(rr)
+}
+func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) {
+ var v interface{}
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.SnapshotRequest), nil
+}
diff --git a/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go
new file mode 100644
index 000000000000..2f629cc15633
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go
@@ -0,0 +1,66 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+ "errors"
+
+ pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
+ "google.golang.org/grpc"
+)
+
+var errAlreadySentHeader = errors.New("adapter: already sent header")
+
+type ws2wc struct{ wserv pb.WatchServer }
+
+func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient {
+ return &ws2wc{wserv}
+}
+
+func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) {
+ cs := newPipeStream(ctx, func(ss chanServerStream) error {
+ return s.wserv.Watch(&ws2wcServerStream{ss})
+ })
+ return &ws2wcClientStream{cs}, nil
+}
+
+// ws2wcClientStream implements Watch_WatchClient
+type ws2wcClientStream struct{ chanClientStream }
+
+// ws2wcServerStream implements Watch_WatchServer
+type ws2wcServerStream struct{ chanServerStream }
+
+func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error {
+ return s.SendMsg(wr)
+}
+func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) {
+ var v interface{}
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.WatchResponse), nil
+}
+
+func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error {
+ return s.SendMsg(wr)
+}
+func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) {
+ var v interface{}
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.WatchRequest), nil
+}
diff --git a/vendor/go.etcd.io/etcd/version/version.go b/vendor/go.etcd.io/etcd/version/version.go
new file mode 100644
index 000000000000..ce2acaef1d60
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/version/version.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package version implements etcd version parsing and contains latest version
+// information.
+package version
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/coreos/go-semver/semver"
+)
+
+var (
+ // MinClusterVersion is the min cluster version this etcd binary is compatible with.
+ MinClusterVersion = "3.0.0"
+ Version = "3.4.9"
+ APIVersion = "unknown"
+
+ // Git SHA Value will be set during build
+ GitSHA = "Not provided (use ./build instead of go build)"
+)
+
+func init() {
+ ver, err := semver.NewVersion(Version)
+ if err == nil {
+ APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
+ }
+}
+
+type Versions struct {
+ Server string `json:"etcdserver"`
+ Cluster string `json:"etcdcluster"`
+ // TODO: raft state machine version
+}
+
+// Cluster only keeps the major.minor.
+func Cluster(v string) string {
+ vs := strings.Split(v, ".")
+ if len(vs) <= 2 {
+ return v
+ }
+ return fmt.Sprintf("%s.%s", vs[0], vs[1])
+}
diff --git a/vendor/go.etcd.io/etcd/wal/decoder.go b/vendor/go.etcd.io/etcd/wal/decoder.go
new file mode 100644
index 000000000000..f2f01fd881c4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/decoder.go
@@ -0,0 +1,188 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bufio"
+ "encoding/binary"
+ "hash"
+ "io"
+ "sync"
+
+ "go.etcd.io/etcd/pkg/crc"
+ "go.etcd.io/etcd/pkg/pbutil"
+ "go.etcd.io/etcd/raft/raftpb"
+ "go.etcd.io/etcd/wal/walpb"
+)
+
+const minSectorSize = 512
+
+// frameSizeBytes is frame size in bytes, including record size and padding size.
+const frameSizeBytes = 8
+
+type decoder struct {
+ mu sync.Mutex
+ brs []*bufio.Reader
+
+ // lastValidOff file offset following the last valid decoded record
+ lastValidOff int64
+ crc hash.Hash32
+}
+
+func newDecoder(r ...io.Reader) *decoder {
+ readers := make([]*bufio.Reader, len(r))
+ for i := range r {
+ readers[i] = bufio.NewReader(r[i])
+ }
+ return &decoder{
+ brs: readers,
+ crc: crc.New(0, crcTable),
+ }
+}
+
+func (d *decoder) decode(rec *walpb.Record) error {
+ rec.Reset()
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return d.decodeRecord(rec)
+}
+
+func (d *decoder) decodeRecord(rec *walpb.Record) error {
+ if len(d.brs) == 0 {
+ return io.EOF
+ }
+
+ l, err := readInt64(d.brs[0])
+ if err == io.EOF || (err == nil && l == 0) {
+ // hit end of file or preallocated space
+ d.brs = d.brs[1:]
+ if len(d.brs) == 0 {
+ return io.EOF
+ }
+ d.lastValidOff = 0
+ return d.decodeRecord(rec)
+ }
+ if err != nil {
+ return err
+ }
+
+ recBytes, padBytes := decodeFrameSize(l)
+
+ data := make([]byte, recBytes+padBytes)
+ if _, err = io.ReadFull(d.brs[0], data); err != nil {
+ // ReadFull returns io.EOF only if no bytes were read
+ // the decoder should treat this as an ErrUnexpectedEOF instead.
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ if err := rec.Unmarshal(data[:recBytes]); err != nil {
+ if d.isTornEntry(data) {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+ }
+
+ // skip crc checking if the record type is crcType
+ if rec.Type != crcType {
+ d.crc.Write(rec.Data)
+ if err := rec.Validate(d.crc.Sum32()); err != nil {
+ if d.isTornEntry(data) {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ }
+ // record decoded as valid; point last valid offset to end of record
+ d.lastValidOff += frameSizeBytes + recBytes + padBytes
+ return nil
+}
+
+func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) {
+ // the record size is stored in the lower 56 bits of the 64-bit length
+ recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56))
+ // non-zero padding is indicated by set MSb / a negative length
+ if lenField < 0 {
+ // padding is stored in lower 3 bits of length MSB
+ padBytes = int64((uint64(lenField) >> 56) & 0x7)
+ }
+ return recBytes, padBytes
+}
+
+// isTornEntry determines whether the last entry of the WAL was partially written
+// and corrupted because of a torn write.
+func (d *decoder) isTornEntry(data []byte) bool {
+ if len(d.brs) != 1 {
+ return false
+ }
+
+ fileOff := d.lastValidOff + frameSizeBytes
+ curOff := 0
+ chunks := [][]byte{}
+ // split data on sector boundaries
+ for curOff < len(data) {
+ chunkLen := int(minSectorSize - (fileOff % minSectorSize))
+ if chunkLen > len(data)-curOff {
+ chunkLen = len(data) - curOff
+ }
+ chunks = append(chunks, data[curOff:curOff+chunkLen])
+ fileOff += int64(chunkLen)
+ curOff += chunkLen
+ }
+
+ // if any data for a sector chunk is all 0, it's a torn write
+ for _, sect := range chunks {
+ isZero := true
+ for _, v := range sect {
+ if v != 0 {
+ isZero = false
+ break
+ }
+ }
+ if isZero {
+ return true
+ }
+ }
+ return false
+}
+
+func (d *decoder) updateCRC(prevCrc uint32) {
+ d.crc = crc.New(prevCrc, crcTable)
+}
+
+func (d *decoder) lastCRC() uint32 {
+ return d.crc.Sum32()
+}
+
+func (d *decoder) lastOffset() int64 { return d.lastValidOff }
+
+func mustUnmarshalEntry(d []byte) raftpb.Entry {
+ var e raftpb.Entry
+ pbutil.MustUnmarshal(&e, d)
+ return e
+}
+
+func mustUnmarshalState(d []byte) raftpb.HardState {
+ var s raftpb.HardState
+ pbutil.MustUnmarshal(&s, d)
+ return s
+}
+
+func readInt64(r io.Reader) (int64, error) {
+ var n int64
+ err := binary.Read(r, binary.LittleEndian, &n)
+ return n, err
+}
diff --git a/vendor/go.etcd.io/etcd/wal/doc.go b/vendor/go.etcd.io/etcd/wal/doc.go
new file mode 100644
index 000000000000..7ea348e4a962
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/doc.go
@@ -0,0 +1,75 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package wal provides an implementation of a write ahead log that is used by
+etcd.
+
+A WAL is created at a particular directory and is made up of a number of
+segmented WAL files. Inside of each file the raft state and entries are appended
+to it with the Save method:
+
+ metadata := []byte{}
+ w, err := wal.Create(zap.NewExample(), "/var/lib/etcd", metadata)
+ ...
+ err := w.Save(s, ents)
+
+After saving a raft snapshot to disk, SaveSnapshot method should be called to
+record it. So WAL can match with the saved snapshot when restarting.
+
+ err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
+
+When a user has finished using a WAL it must be closed:
+
+ w.Close()
+
+Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record
+protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a
+64-bit packed structure holding the length of the remaining logical record data in its lower
+56 bits and its physical padding in the first three bits of the most significant byte. Each
+record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32
+value of all record protobufs preceding the current record.
+
+WAL files are placed inside of the directory in the following format:
+$seq-$index.wal
+
+The first WAL file to be created will be 0000000000000000-0000000000000000.wal
+indicating an initial sequence of 0 and an initial raft index of 0. The first
+entry written to WAL MUST have raft index 0.
+
+WAL will cut its current tail wal file if its size exceeds 64MB. This will increment an internal
+sequence number and cause a new file to be created. If the last raft index saved
+was 0x20 and this is the first time cut has been called on this WAL then the sequence will
+increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal.
+If a second cut issues 0x10 entries with incremental index later then the file will be called:
+0000000000000002-0000000000000031.wal.
+
+At a later time a WAL can be opened at a particular snapshot. If there is no
+snapshot, an empty snapshot should be passed in.
+
+ w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2})
+ ...
+
+The snapshot must have been written to the WAL.
+
+Additional items cannot be Saved to this WAL until all of the items from the given
+snapshot to the end of the WAL are read first:
+
+ metadata, state, ents, err := w.ReadAll()
+
+This will give you the metadata, the last raft.State and the slice of
+raft.Entry items in the log.
+
+*/
+package wal
diff --git a/vendor/go.etcd.io/etcd/wal/encoder.go b/vendor/go.etcd.io/etcd/wal/encoder.go
new file mode 100644
index 000000000000..4de853b69a9f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/encoder.go
@@ -0,0 +1,124 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "encoding/binary"
+ "hash"
+ "io"
+ "os"
+ "sync"
+
+ "go.etcd.io/etcd/pkg/crc"
+ "go.etcd.io/etcd/pkg/ioutil"
+ "go.etcd.io/etcd/wal/walpb"
+)
+
+// walPageBytes is the alignment for flushing records to the backing Writer.
+// It should be a multiple of the minimum sector size so that WAL can safely
+// distinguish between torn writes and ordinary data corruption.
+const walPageBytes = 8 * minSectorSize
+
+type encoder struct {
+ mu sync.Mutex
+ bw *ioutil.PageWriter
+
+ crc hash.Hash32
+ buf []byte
+ uint64buf []byte
+}
+
+func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder {
+ return &encoder{
+ bw: ioutil.NewPageWriter(w, walPageBytes, pageOffset),
+ crc: crc.New(prevCrc, crcTable),
+ // 1MB buffer
+ buf: make([]byte, 1024*1024),
+ uint64buf: make([]byte, 8),
+ }
+}
+
+// newFileEncoder creates a new encoder with current file offset for the page writer.
+func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) {
+ offset, err := f.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return nil, err
+ }
+ return newEncoder(f, prevCrc, int(offset)), nil
+}
+
+func (e *encoder) encode(rec *walpb.Record) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ e.crc.Write(rec.Data)
+ rec.Crc = e.crc.Sum32()
+ var (
+ data []byte
+ err error
+ n int
+ )
+
+ if rec.Size() > len(e.buf) {
+ data, err = rec.Marshal()
+ if err != nil {
+ return err
+ }
+ } else {
+ n, err = rec.MarshalTo(e.buf)
+ if err != nil {
+ return err
+ }
+ data = e.buf[:n]
+ }
+
+ lenField, padBytes := encodeFrameSize(len(data))
+ if err = writeUint64(e.bw, lenField, e.uint64buf); err != nil {
+ return err
+ }
+
+ if padBytes != 0 {
+ data = append(data, make([]byte, padBytes)...)
+ }
+ n, err = e.bw.Write(data)
+ walWriteBytes.Add(float64(n))
+ return err
+}
+
+func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) {
+ lenField = uint64(dataBytes)
+ // force 8 byte alignment so length never gets a torn write
+ padBytes = (8 - (dataBytes % 8)) % 8
+ if padBytes != 0 {
+ lenField |= uint64(0x80|padBytes) << 56
+ }
+ return lenField, padBytes
+}
+
+func (e *encoder) flush() error {
+ e.mu.Lock()
+ n, err := e.bw.FlushN()
+ e.mu.Unlock()
+ walWriteBytes.Add(float64(n))
+ return err
+}
+
+func writeUint64(w io.Writer, n uint64, buf []byte) error {
+ // http://golang.org/src/encoding/binary/binary.go
+ binary.LittleEndian.PutUint64(buf, n)
+ nv, err := w.Write(buf)
+ walWriteBytes.Add(float64(nv))
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/wal/file_pipeline.go b/vendor/go.etcd.io/etcd/wal/file_pipeline.go
new file mode 100644
index 000000000000..e1e1c557b8bb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/file_pipeline.go
@@ -0,0 +1,106 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "go.etcd.io/etcd/pkg/fileutil"
+
+ "go.uber.org/zap"
+)
+
+// filePipeline pipelines allocating disk space
+type filePipeline struct {
+ lg *zap.Logger
+
+ // dir to put files
+ dir string
+ // size of files to make, in bytes
+ size int64
+ // count number of files generated
+ count int
+
+ filec chan *fileutil.LockedFile
+ errc chan error
+ donec chan struct{}
+}
+
+func newFilePipeline(lg *zap.Logger, dir string, fileSize int64) *filePipeline {
+ fp := &filePipeline{
+ lg: lg,
+ dir: dir,
+ size: fileSize,
+ filec: make(chan *fileutil.LockedFile),
+ errc: make(chan error, 1),
+ donec: make(chan struct{}),
+ }
+ go fp.run()
+ return fp
+}
+
+// Open returns a fresh file for writing. Rename the file before calling
+// Open again or there will be file collisions.
+func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) {
+ select {
+ case f = <-fp.filec:
+ case err = <-fp.errc:
+ }
+ return f, err
+}
+
+func (fp *filePipeline) Close() error {
+ close(fp.donec)
+ return <-fp.errc
+}
+
+func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) {
+ // count % 2 so this file isn't the same as the one last published
+ fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2))
+ if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
+ return nil, err
+ }
+ if err = fileutil.Preallocate(f.File, fp.size, true); err != nil {
+ if fp.lg != nil {
+ fp.lg.Warn("failed to preallocate space when creating a new WAL", zap.Int64("size", fp.size), zap.Error(err))
+ } else {
+ plog.Errorf("failed to allocate space when creating new wal file (%v)", err)
+ }
+ f.Close()
+ return nil, err
+ }
+ fp.count++
+ return f, nil
+}
+
+func (fp *filePipeline) run() {
+ defer close(fp.errc)
+ for {
+ f, err := fp.alloc()
+ if err != nil {
+ fp.errc <- err
+ return
+ }
+ select {
+ case fp.filec <- f:
+ case <-fp.donec:
+ os.Remove(f.Name())
+ f.Close()
+ return
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/wal/metrics.go b/vendor/go.etcd.io/etcd/wal/metrics.go
new file mode 100644
index 000000000000..814d654cdd30
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/metrics.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ walFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "wal_fsync_duration_seconds",
+ Help: "The latency distributions of fsync called by WAL.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ walWriteBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "wal_write_bytes_total",
+ Help: "Total number of bytes written in WAL.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(walFsyncSec)
+ prometheus.MustRegister(walWriteBytes)
+}
diff --git a/vendor/go.etcd.io/etcd/wal/repair.go b/vendor/go.etcd.io/etcd/wal/repair.go
new file mode 100644
index 000000000000..5c7c5d1759f7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/repair.go
@@ -0,0 +1,143 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "io"
+ "os"
+ "path/filepath"
+ "time"
+
+ "go.etcd.io/etcd/pkg/fileutil"
+ "go.etcd.io/etcd/wal/walpb"
+ "go.uber.org/zap"
+)
+
+// Repair tries to repair ErrUnexpectedEOF in the
+// last wal file by truncating.
+func Repair(lg *zap.Logger, dirpath string) bool {
+ f, err := openLast(lg, dirpath)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+
+ if lg != nil {
+ lg.Info("repairing", zap.String("path", f.Name()))
+ } else {
+ plog.Noticef("repairing %v", f.Name())
+ }
+
+ rec := &walpb.Record{}
+ decoder := newDecoder(f)
+ for {
+ lastOffset := decoder.lastOffset()
+ err := decoder.decode(rec)
+ switch err {
+ case nil:
+ // update crc of the decoder when necessary
+ switch rec.Type {
+ case crcType:
+ crc := decoder.crc.Sum32()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return false
+ }
+ decoder.updateCRC(rec.Crc)
+ }
+ continue
+
+ case io.EOF:
+ if lg != nil {
+ lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.EOF))
+ }
+ return true
+
+ case io.ErrUnexpectedEOF:
+ bf, bferr := os.Create(f.Name() + ".broken")
+ if bferr != nil {
+ if lg != nil {
+ lg.Warn("failed to create backup file", zap.String("path", f.Name()+".broken"), zap.Error(bferr))
+ } else {
+ plog.Errorf("could not repair %v, failed to create backup file", f.Name())
+ }
+ return false
+ }
+ defer bf.Close()
+
+ if _, err = f.Seek(0, io.SeekStart); err != nil {
+ if lg != nil {
+ lg.Warn("failed to read file", zap.String("path", f.Name()), zap.Error(err))
+ } else {
+ plog.Errorf("could not repair %v, failed to read file", f.Name())
+ }
+ return false
+ }
+
+ if _, err = io.Copy(bf, f); err != nil {
+ if lg != nil {
+ lg.Warn("failed to copy", zap.String("from", f.Name()+".broken"), zap.String("to", f.Name()), zap.Error(err))
+ } else {
+ plog.Errorf("could not repair %v, failed to copy file", f.Name())
+ }
+ return false
+ }
+
+ if err = f.Truncate(lastOffset); err != nil {
+ if lg != nil {
+ lg.Warn("failed to truncate", zap.String("path", f.Name()), zap.Error(err))
+ } else {
+ plog.Errorf("could not repair %v, failed to truncate file", f.Name())
+ }
+ return false
+ }
+
+ start := time.Now()
+ if err = fileutil.Fsync(f.File); err != nil {
+ if lg != nil {
+ lg.Warn("failed to fsync", zap.String("path", f.Name()), zap.Error(err))
+ } else {
+ plog.Errorf("could not repair %v, failed to sync file", f.Name())
+ }
+ return false
+ }
+ walFsyncSec.Observe(time.Since(start).Seconds())
+
+ if lg != nil {
+ lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.ErrUnexpectedEOF))
+ }
+ return true
+
+ default:
+ if lg != nil {
+ lg.Warn("failed to repair", zap.String("path", f.Name()), zap.Error(err))
+ } else {
+ plog.Errorf("could not repair error (%v)", err)
+ }
+ return false
+ }
+ }
+}
+
+// openLast opens the last wal file for read and write.
+func openLast(lg *zap.Logger, dirpath string) (*fileutil.LockedFile, error) {
+ names, err := readWALNames(lg, dirpath)
+ if err != nil {
+ return nil, err
+ }
+ last := filepath.Join(dirpath, names[len(names)-1])
+ return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode)
+}
diff --git a/vendor/go.etcd.io/etcd/wal/util.go b/vendor/go.etcd.io/etcd/wal/util.go
new file mode 100644
index 000000000000..a3f314bb1268
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/util.go
@@ -0,0 +1,124 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "go.etcd.io/etcd/pkg/fileutil"
+
+ "go.uber.org/zap"
+)
+
+var errBadWALName = errors.New("bad wal name")
+
+// Exist returns true if there are any files in a given directory.
+func Exist(dir string) bool {
+ names, err := fileutil.ReadDir(dir, fileutil.WithExt(".wal"))
+ if err != nil {
+ return false
+ }
+ return len(names) != 0
+}
+
+// searchIndex returns the last array index of names whose raft index section is
+// equal to or smaller than the given index.
+// The given names MUST be sorted.
+func searchIndex(lg *zap.Logger, names []string, index uint64) (int, bool) {
+ for i := len(names) - 1; i >= 0; i-- {
+ name := names[i]
+ _, curIndex, err := parseWALName(name)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err))
+ } else {
+ plog.Panicf("parse correct name should never fail: %v", err)
+ }
+ }
+ if index >= curIndex {
+ return i, true
+ }
+ }
+ return -1, false
+}
+
+// names should have been sorted based on sequence number.
+// isValidSeq checks whether seq increases continuously.
+func isValidSeq(lg *zap.Logger, names []string) bool {
+ var lastSeq uint64
+ for _, name := range names {
+ curSeq, _, err := parseWALName(name)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err))
+ } else {
+ plog.Panicf("parse correct name should never fail: %v", err)
+ }
+ }
+ if lastSeq != 0 && lastSeq != curSeq-1 {
+ return false
+ }
+ lastSeq = curSeq
+ }
+ return true
+}
+
+func readWALNames(lg *zap.Logger, dirpath string) ([]string, error) {
+ names, err := fileutil.ReadDir(dirpath)
+ if err != nil {
+ return nil, err
+ }
+ wnames := checkWalNames(lg, names)
+ if len(wnames) == 0 {
+ return nil, ErrFileNotFound
+ }
+ return wnames, nil
+}
+
+func checkWalNames(lg *zap.Logger, names []string) []string {
+ wnames := make([]string, 0)
+ for _, name := range names {
+ if _, _, err := parseWALName(name); err != nil {
+ // don't complain about left over tmp files
+ if !strings.HasSuffix(name, ".tmp") {
+ if lg != nil {
+ lg.Warn(
+ "ignored file in WAL directory",
+ zap.String("path", name),
+ )
+ } else {
+ plog.Warningf("ignored file %v in wal", name)
+ }
+ }
+ continue
+ }
+ wnames = append(wnames, name)
+ }
+ return wnames
+}
+
+func parseWALName(str string) (seq, index uint64, err error) {
+ if !strings.HasSuffix(str, ".wal") {
+ return 0, 0, errBadWALName
+ }
+ _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
+ return seq, index, err
+}
+
+func walName(seq, index uint64) string {
+ return fmt.Sprintf("%016x-%016x.wal", seq, index)
+}
diff --git a/vendor/go.etcd.io/etcd/wal/wal.go b/vendor/go.etcd.io/etcd/wal/wal.go
new file mode 100644
index 000000000000..0451f94d0588
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/wal.go
@@ -0,0 +1,985 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/pkg/fileutil"
+ "go.etcd.io/etcd/pkg/pbutil"
+ "go.etcd.io/etcd/raft"
+ "go.etcd.io/etcd/raft/raftpb"
+ "go.etcd.io/etcd/wal/walpb"
+
+ "github.com/coreos/pkg/capnslog"
+ "go.uber.org/zap"
+)
+
+const (
+ metadataType int64 = iota + 1
+ entryType
+ stateType
+ crcType
+ snapshotType
+
+ // warnSyncDuration is the amount of time allotted to an fsync before
+ // logging a warning
+ warnSyncDuration = time.Second
+)
+
+var (
+ // SegmentSizeBytes is the preallocated size of each wal segment file.
+ // The actual size might be larger than this. In general, the default
+ // value should be used, but this is defined as an exported variable
+ // so that tests can set a different segment size.
+ SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
+
+ plog = capnslog.NewPackageLogger("go.etcd.io/etcd", "wal")
+
+ ErrMetadataConflict = errors.New("wal: conflicting metadata found")
+ ErrFileNotFound = errors.New("wal: file not found")
+ ErrCRCMismatch = errors.New("wal: crc mismatch")
+ ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
+ ErrSnapshotNotFound = errors.New("wal: snapshot not found")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+)
+
+// WAL is a logical representation of the stable storage.
+// WAL is either in read mode or append mode but not both.
+// A newly created WAL is in append mode, and ready for appending records.
+// A just opened WAL is in read mode, and ready for reading records.
+// The WAL will be ready for appending after reading out all the previous records.
+type WAL struct {
+ lg *zap.Logger
+
+ dir string // the living directory of the underlay files
+
+ // dirFile is a fd for the wal directory for syncing on Rename
+ dirFile *os.File
+
+ metadata []byte // metadata recorded at the head of each WAL
+ state raftpb.HardState // hardstate recorded at the head of WAL
+
+ start walpb.Snapshot // snapshot to start reading
+ decoder *decoder // decoder to decode records
+ readClose func() error // closer for decode reader
+
+ mu sync.Mutex
+ enti uint64 // index of the last entry saved to the wal
+ encoder *encoder // encoder to encode records
+
+ locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
+ fp *filePipeline
+}
+
+// Create creates a WAL ready for appending records. The given metadata is
+// recorded at the head of each WAL file, and can be retrieved with ReadAll.
+func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) {
+ if Exist(dirpath) {
+ return nil, os.ErrExist
+ }
+
+ // keep temporary wal directory so WAL initialization appears atomic
+ tmpdirpath := filepath.Clean(dirpath) + ".tmp"
+ if fileutil.Exist(tmpdirpath) {
+ if err := os.RemoveAll(tmpdirpath); err != nil {
+ return nil, err
+ }
+ }
+ if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to create a temporary WAL directory",
+ zap.String("tmp-dir-path", tmpdirpath),
+ zap.String("dir-path", dirpath),
+ zap.Error(err),
+ )
+ }
+ return nil, err
+ }
+
+ p := filepath.Join(tmpdirpath, walName(0, 0))
+ f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode)
+ if err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to flock an initial WAL file",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ }
+ return nil, err
+ }
+ if _, err = f.Seek(0, io.SeekEnd); err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to seek an initial WAL file",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ }
+ return nil, err
+ }
+ if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to preallocate an initial WAL file",
+ zap.String("path", p),
+ zap.Int64("segment-bytes", SegmentSizeBytes),
+ zap.Error(err),
+ )
+ }
+ return nil, err
+ }
+
+ w := &WAL{
+ lg: lg,
+ dir: dirpath,
+ metadata: metadata,
+ }
+ w.encoder, err = newFileEncoder(f.File, 0)
+ if err != nil {
+ return nil, err
+ }
+ w.locks = append(w.locks, f)
+ if err = w.saveCrc(0); err != nil {
+ return nil, err
+ }
+ if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
+ return nil, err
+ }
+ if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+ return nil, err
+ }
+
+ if w, err = w.renameWAL(tmpdirpath); err != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to rename the temporary WAL directory",
+ zap.String("tmp-dir-path", tmpdirpath),
+ zap.String("dir-path", w.dir),
+ zap.Error(err),
+ )
+ }
+ return nil, err
+ }
+
+ var perr error
+ defer func() {
+ if perr != nil {
+ w.cleanupWAL(lg)
+ }
+ }()
+
+ // directory was renamed; sync parent dir to persist rename
+ pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir))
+ if perr != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to open the parent data directory",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ }
+ return nil, perr
+ }
+ start := time.Now()
+ if perr = fileutil.Fsync(pdir); perr != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to fsync the parent data directory file",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ }
+ return nil, perr
+ }
+ walFsyncSec.Observe(time.Since(start).Seconds())
+
+ if perr = pdir.Close(); perr != nil {
+ if lg != nil {
+ lg.Warn(
+ "failed to close the parent data directory file",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ }
+ return nil, perr
+ }
+
+ return w, nil
+}
+
+func (w *WAL) cleanupWAL(lg *zap.Logger) {
+ var err error
+ if err = w.Close(); err != nil {
+ if lg != nil {
+ lg.Panic("failed to close WAL during cleanup", zap.Error(err))
+ } else {
+ plog.Panicf("failed to close WAL during cleanup: %v", err)
+ }
+ }
+ brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
+ if err = os.Rename(w.dir, brokenDirName); err != nil {
+ if lg != nil {
+ lg.Panic(
+ "failed to rename WAL during cleanup",
+ zap.Error(err),
+ zap.String("source-path", w.dir),
+ zap.String("rename-path", brokenDirName),
+ )
+ } else {
+ plog.Panicf("failed to rename WAL during cleanup: %v", err)
+ }
+ }
+}
+
+func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
+ if err := os.RemoveAll(w.dir); err != nil {
+ return nil, err
+ }
+ // On non-Windows platforms, hold the lock while renaming. Releasing
+ // the lock and trying to reacquire it quickly can be flaky because
+ // it's possible the process will fork to spawn a process while this is
+ // happening. The fds are set up as close-on-exec by the Go runtime,
+ // but there is a window between the fork and the exec where another
+ // process holds the lock.
+ if err := os.Rename(tmpdirpath, w.dir); err != nil {
+ if _, ok := err.(*os.LinkError); ok {
+ return w.renameWALUnlock(tmpdirpath)
+ }
+ return nil, err
+ }
+ w.fp = newFilePipeline(w.lg, w.dir, SegmentSizeBytes)
+ df, err := fileutil.OpenDir(w.dir)
+ w.dirFile = df
+ return w, err
+}
+
+func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
+ // rename of directory with locked files doesn't work on windows/cifs;
+ // close the WAL to release the locks so the directory can be renamed.
+ if w.lg != nil {
+ w.lg.Info(
+ "closing WAL to release flock and retry directory renaming",
+ zap.String("from", tmpdirpath),
+ zap.String("to", w.dir),
+ )
+ } else {
+ plog.Infof("releasing file lock to rename %q to %q", tmpdirpath, w.dir)
+ }
+ w.Close()
+
+ if err := os.Rename(tmpdirpath, w.dir); err != nil {
+ return nil, err
+ }
+
+ // reopen and relock
+ newWAL, oerr := Open(w.lg, w.dir, walpb.Snapshot{})
+ if oerr != nil {
+ return nil, oerr
+ }
+ if _, _, _, err := newWAL.ReadAll(); err != nil {
+ newWAL.Close()
+ return nil, err
+ }
+ return newWAL, nil
+}
+
+// Open opens the WAL at the given snap.
+// The snap SHOULD have been previously saved to the WAL, or the following
+// ReadAll will fail.
+// The returned WAL is ready to read and the first record will be the one after
+// the given snap. The WAL cannot be appended to before reading out all of its
+// previous records.
+func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ w, err := openAtIndex(lg, dirpath, snap, true)
+ if err != nil {
+ return nil, err
+ }
+ if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
+ return nil, err
+ }
+ return w, nil
+}
+
+// OpenForRead only opens the wal files for read.
+// Write on a read only wal panics.
+func OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ return openAtIndex(lg, dirpath, snap, false)
+}
+
+func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
+ names, nameIndex, err := selectWALFiles(lg, dirpath, snap)
+ if err != nil {
+ return nil, err
+ }
+
+ rs, ls, closer, err := openWALFiles(lg, dirpath, names, nameIndex, write)
+ if err != nil {
+ return nil, err
+ }
+
+ // create a WAL ready for reading
+ w := &WAL{
+ lg: lg,
+ dir: dirpath,
+ start: snap,
+ decoder: newDecoder(rs...),
+ readClose: closer,
+ locks: ls,
+ }
+
+ if write {
+ // write reuses the file descriptors from read; don't close so
+ // WAL can append without dropping the file lock
+ w.readClose = nil
+ if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil {
+ closer()
+ return nil, err
+ }
+ w.fp = newFilePipeline(lg, w.dir, SegmentSizeBytes)
+ }
+
+ return w, nil
+}
+
+func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]string, int, error) {
+ names, err := readWALNames(lg, dirpath)
+ if err != nil {
+ return nil, -1, err
+ }
+
+ nameIndex, ok := searchIndex(lg, names, snap.Index)
+ if !ok || !isValidSeq(lg, names[nameIndex:]) {
+ err = ErrFileNotFound
+ return nil, -1, err
+ }
+
+ return names, nameIndex, nil
+}
+
+func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]io.Reader, []*fileutil.LockedFile, func() error, error) {
+ rcs := make([]io.ReadCloser, 0)
+ rs := make([]io.Reader, 0)
+ ls := make([]*fileutil.LockedFile, 0)
+ for _, name := range names[nameIndex:] {
+ p := filepath.Join(dirpath, name)
+ if write {
+ l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
+ if err != nil {
+ closeAll(rcs...)
+ return nil, nil, nil, err
+ }
+ ls = append(ls, l)
+ rcs = append(rcs, l)
+ } else {
+ rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode)
+ if err != nil {
+ closeAll(rcs...)
+ return nil, nil, nil, err
+ }
+ ls = append(ls, nil)
+ rcs = append(rcs, rf)
+ }
+ rs = append(rs, rcs[len(rcs)-1])
+ }
+
+ closer := func() error { return closeAll(rcs...) }
+
+ return rs, ls, closer, nil
+}
+
+// ReadAll reads out records of the current WAL.
+// If opened in write mode, it must read out all records until EOF. Or an error
+// will be returned.
+// If opened in read mode, it will try to read all records if possible.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If loaded snap doesn't match with the expected one, it will return
+// all the records and error ErrSnapshotMismatch.
+// TODO: detect not-last-snap error.
+// TODO: maybe loose the checking of match.
+// After ReadAll, the WAL will be ready for appending new records.
+func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ rec := &walpb.Record{}
+ decoder := w.decoder
+
+ var match bool
+ for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+ switch rec.Type {
+ case entryType:
+ e := mustUnmarshalEntry(rec.Data)
+ if e.Index > w.start.Index {
+ ents = append(ents[:e.Index-w.start.Index-1], e)
+ }
+ w.enti = e.Index
+
+ case stateType:
+ state = mustUnmarshalState(rec.Data)
+
+ case metadataType:
+ if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+ state.Reset()
+ return nil, state, nil, ErrMetadataConflict
+ }
+ metadata = rec.Data
+
+ case crcType:
+ crc := decoder.crc.Sum32()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ state.Reset()
+ return nil, state, nil, ErrCRCMismatch
+ }
+ decoder.updateCRC(rec.Crc)
+
+ case snapshotType:
+ var snap walpb.Snapshot
+ pbutil.MustUnmarshal(&snap, rec.Data)
+ if snap.Index == w.start.Index {
+ if snap.Term != w.start.Term {
+ state.Reset()
+ return nil, state, nil, ErrSnapshotMismatch
+ }
+ match = true
+ }
+
+ default:
+ state.Reset()
+ return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ switch w.tail() {
+ case nil:
+ // We do not have to read out all entries in read mode.
+ // The last record maybe a partial written one, so
+ // ErrunexpectedEOF might be returned.
+ if err != io.EOF && err != io.ErrUnexpectedEOF {
+ state.Reset()
+ return nil, state, nil, err
+ }
+ default:
+ // We must read all of the entries if WAL is opened in write mode.
+ if err != io.EOF {
+ state.Reset()
+ return nil, state, nil, err
+ }
+ // decodeRecord() will return io.EOF if it detects a zero record,
+ // but this zero record may be followed by non-zero records from
+ // a torn write. Overwriting some of these non-zero records, but
+ // not all, will cause CRC errors on WAL open. Since the records
+ // were never fully synced to disk in the first place, it's safe
+ // to zero them out to avoid any CRC errors from new writes.
+ if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil {
+ return nil, state, nil, err
+ }
+ if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
+ return nil, state, nil, err
+ }
+ }
+
+ err = nil
+ if !match {
+ err = ErrSnapshotNotFound
+ }
+
+ // close decoder, disable reading
+ if w.readClose != nil {
+ w.readClose()
+ w.readClose = nil
+ }
+ w.start = walpb.Snapshot{}
+
+ w.metadata = metadata
+
+ if w.tail() != nil {
+ // create encoder (chain crc with the decoder), enable appending
+ w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
+ if err != nil {
+ return
+ }
+ }
+ w.decoder = nil
+
+ return metadata, state, ents, err
+}
+
+// ValidSnapshotEntries returns all the valid snapshot entries in the wal logs in the given directory.
+// Snapshot entries are valid if their index is less than or equal to the most recent committed hardstate.
+func ValidSnapshotEntries(lg *zap.Logger, walDir string) ([]walpb.Snapshot, error) {
+ var snaps []walpb.Snapshot
+ var state raftpb.HardState
+ var err error
+
+ rec := &walpb.Record{}
+ names, err := readWALNames(lg, walDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // open wal files in read mode, so that there is no conflict
+ // when the same WAL is opened elsewhere in write mode
+ rs, _, closer, err := openWALFiles(lg, walDir, names, 0, false)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if closer != nil {
+ closer()
+ }
+ }()
+
+ // create a new decoder from the readers on the WAL files
+ decoder := newDecoder(rs...)
+
+ for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+ switch rec.Type {
+ case snapshotType:
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ snaps = append(snaps, loadedSnap)
+ case stateType:
+ state = mustUnmarshalState(rec.Data)
+ case crcType:
+ crc := decoder.crc.Sum32()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return nil, ErrCRCMismatch
+ }
+ decoder.updateCRC(rec.Crc)
+ }
+ }
+ // We do not have to read out all the WAL entries
+ // as the decoder is opened in read mode.
+ if err != io.EOF && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+
+ // filter out any snaps that are newer than the committed hardstate
+ n := 0
+ for _, s := range snaps {
+ if s.Index <= state.Commit {
+ snaps[n] = s
+ n++
+ }
+ }
+ snaps = snaps[:n:n]
+
+ return snaps, nil
+}
+
+// Verify reads through the given WAL and verifies that it is not corrupted.
+// It creates a new decoder to read through the records of the given WAL.
+// It does not conflict with any open WAL, but it is recommended not to
+// call this function after opening the WAL for writing.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If the loaded snap doesn't match with the expected one, it will
+// return error ErrSnapshotMismatch.
+func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) error {
+ var metadata []byte
+ var err error
+ var match bool
+
+ rec := &walpb.Record{}
+
+ names, nameIndex, err := selectWALFiles(lg, walDir, snap)
+ if err != nil {
+ return err
+ }
+
+ // open wal files in read mode, so that there is no conflict
+ // when the same WAL is opened elsewhere in write mode
+ rs, _, closer, err := openWALFiles(lg, walDir, names, nameIndex, false)
+ if err != nil {
+ return err
+ }
+
+ // create a new decoder from the readers on the WAL files
+ decoder := newDecoder(rs...)
+
+ for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
+ switch rec.Type {
+ case metadataType:
+ if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+ return ErrMetadataConflict
+ }
+ metadata = rec.Data
+ case crcType:
+ crc := decoder.crc.Sum32()
+ // Current crc of decoder must match the crc of the record.
+ // We need not match 0 crc, since the decoder is a new one at this point.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return ErrCRCMismatch
+ }
+ decoder.updateCRC(rec.Crc)
+ case snapshotType:
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ if loadedSnap.Index == snap.Index {
+ if loadedSnap.Term != snap.Term {
+ return ErrSnapshotMismatch
+ }
+ match = true
+ }
+ // We ignore all entry and state type records as these
+ // are not necessary for validating the WAL contents
+ case entryType:
+ case stateType:
+ default:
+ return fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ if closer != nil {
+ closer()
+ }
+
+ // We do not have to read out all the WAL entries
+ // as the decoder is opened in read mode.
+ if err != io.EOF && err != io.ErrUnexpectedEOF {
+ return err
+ }
+
+ if !match {
+ return ErrSnapshotNotFound
+ }
+
+ return nil
+}
+
+// cut closes current file written and creates a new one ready to append.
+// cut first creates a temp wal file and writes necessary headers into it.
+// Then cut atomically rename temp wal file to a wal file.
+func (w *WAL) cut() error {
+ // close old wal file; truncate to avoid wasting space if an early cut
+ off, serr := w.tail().Seek(0, io.SeekCurrent)
+ if serr != nil {
+ return serr
+ }
+
+ if err := w.tail().Truncate(off); err != nil {
+ return err
+ }
+
+ if err := w.sync(); err != nil {
+ return err
+ }
+
+ fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
+
+ // create a temp wal file with name sequence + 1, or truncate the existing one
+ newTail, err := w.fp.Open()
+ if err != nil {
+ return err
+ }
+
+ // update writer and save the previous crc
+ w.locks = append(w.locks, newTail)
+ prevCrc := w.encoder.crc.Sum32()
+ w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+ if err != nil {
+ return err
+ }
+
+ if err = w.saveCrc(prevCrc); err != nil {
+ return err
+ }
+
+ if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
+ return err
+ }
+
+ if err = w.saveState(&w.state); err != nil {
+ return err
+ }
+
+ // atomically move temp wal file to wal file
+ if err = w.sync(); err != nil {
+ return err
+ }
+
+ off, err = w.tail().Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+
+ if err = os.Rename(newTail.Name(), fpath); err != nil {
+ return err
+ }
+ start := time.Now()
+ if err = fileutil.Fsync(w.dirFile); err != nil {
+ return err
+ }
+ walFsyncSec.Observe(time.Since(start).Seconds())
+
+ // reopen newTail with its new path so calls to Name() match the wal filename format
+ newTail.Close()
+
+ if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
+ return err
+ }
+ if _, err = newTail.Seek(off, io.SeekStart); err != nil {
+ return err
+ }
+
+ w.locks[len(w.locks)-1] = newTail
+
+ prevCrc = w.encoder.crc.Sum32()
+ w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+ if err != nil {
+ return err
+ }
+
+ if w.lg != nil {
+ w.lg.Info("created a new WAL segment", zap.String("path", fpath))
+ } else {
+ plog.Infof("segmented wal file %v is created", fpath)
+ }
+ return nil
+}
+
+func (w *WAL) sync() error {
+ if w.encoder != nil {
+ if err := w.encoder.flush(); err != nil {
+ return err
+ }
+ }
+ start := time.Now()
+ err := fileutil.Fdatasync(w.tail().File)
+
+ took := time.Since(start)
+ if took > warnSyncDuration {
+ if w.lg != nil {
+ w.lg.Warn(
+ "slow fdatasync",
+ zap.Duration("took", took),
+ zap.Duration("expected-duration", warnSyncDuration),
+ )
+ } else {
+ plog.Warningf("sync duration of %v, expected less than %v", took, warnSyncDuration)
+ }
+ }
+ walFsyncSec.Observe(took.Seconds())
+
+ return err
+}
+
+func (w *WAL) Sync() error {
+ return w.sync()
+}
+
+// ReleaseLockTo releases the locks, which has smaller index than the given index
+// except the largest one among them.
+// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
+// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
+func (w *WAL) ReleaseLockTo(index uint64) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if len(w.locks) == 0 {
+ return nil
+ }
+
+ var smaller int
+ found := false
+ for i, l := range w.locks {
+ _, lockIndex, err := parseWALName(filepath.Base(l.Name()))
+ if err != nil {
+ return err
+ }
+ if lockIndex >= index {
+ smaller = i - 1
+ found = true
+ break
+ }
+ }
+
+ // if no lock index is greater than the release index, we can
+ // release lock up to the last one(excluding).
+ if !found {
+ smaller = len(w.locks) - 1
+ }
+
+ if smaller <= 0 {
+ return nil
+ }
+
+ for i := 0; i < smaller; i++ {
+ if w.locks[i] == nil {
+ continue
+ }
+ w.locks[i].Close()
+ }
+ w.locks = w.locks[smaller:]
+
+ return nil
+}
+
+// Close closes the current WAL file and directory.
+func (w *WAL) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.fp != nil {
+ w.fp.Close()
+ w.fp = nil
+ }
+
+ if w.tail() != nil {
+ if err := w.sync(); err != nil {
+ return err
+ }
+ }
+ for _, l := range w.locks {
+ if l == nil {
+ continue
+ }
+ if err := l.Close(); err != nil {
+ if w.lg != nil {
+ w.lg.Warn("failed to close WAL", zap.Error(err))
+ } else {
+ plog.Errorf("failed to unlock during closing wal: %s", err)
+ }
+ }
+ }
+
+ return w.dirFile.Close()
+}
+
+func (w *WAL) saveEntry(e *raftpb.Entry) error {
+ // TODO: add MustMarshalTo to reduce one allocation.
+ b := pbutil.MustMarshal(e)
+ rec := &walpb.Record{Type: entryType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ w.enti = e.Index
+ return nil
+}
+
+func (w *WAL) saveState(s *raftpb.HardState) error {
+ if raft.IsEmptyHardState(*s) {
+ return nil
+ }
+ w.state = *s
+ b := pbutil.MustMarshal(s)
+ rec := &walpb.Record{Type: stateType, Data: b}
+ return w.encoder.encode(rec)
+}
+
+func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ // short cut, do not call sync
+ if raft.IsEmptyHardState(st) && len(ents) == 0 {
+ return nil
+ }
+
+ mustSync := raft.MustSync(st, w.state, len(ents))
+
+ // TODO(xiangli): no more reference operator
+ for i := range ents {
+ if err := w.saveEntry(&ents[i]); err != nil {
+ return err
+ }
+ }
+ if err := w.saveState(&st); err != nil {
+ return err
+ }
+
+ curOff, err := w.tail().Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ if curOff < SegmentSizeBytes {
+ if mustSync {
+ return w.sync()
+ }
+ return nil
+ }
+
+ return w.cut()
+}
+
+func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
+ b := pbutil.MustMarshal(&e)
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ rec := &walpb.Record{Type: snapshotType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ // update enti only when snapshot is ahead of last index
+ if w.enti < e.Index {
+ w.enti = e.Index
+ }
+ return w.sync()
+}
+
+func (w *WAL) saveCrc(prevCrc uint32) error {
+ return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
+}
+
+func (w *WAL) tail() *fileutil.LockedFile {
+ if len(w.locks) > 0 {
+ return w.locks[len(w.locks)-1]
+ }
+ return nil
+}
+
+func (w *WAL) seq() uint64 {
+ t := w.tail()
+ if t == nil {
+ return 0
+ }
+ seq, _, err := parseWALName(filepath.Base(t.Name()))
+ if err != nil {
+ if w.lg != nil {
+ w.lg.Fatal("failed to parse WAL name", zap.String("name", t.Name()), zap.Error(err))
+ } else {
+ plog.Fatalf("bad wal name %s (%v)", t.Name(), err)
+ }
+ }
+ return seq
+}
+
+func closeAll(rcs ...io.ReadCloser) error {
+ for _, f := range rcs {
+ if err := f.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/wal/walpb/record.go b/vendor/go.etcd.io/etcd/wal/walpb/record.go
new file mode 100644
index 000000000000..30a05e0c139c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/walpb/record.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package walpb
+
+import "errors"
+
+var (
+ ErrCRCMismatch = errors.New("walpb: crc mismatch")
+)
+
+func (rec *Record) Validate(crc uint32) error {
+ if rec.Crc == crc {
+ return nil
+ }
+ rec.Reset()
+ return ErrCRCMismatch
+}
diff --git a/vendor/go.etcd.io/etcd/wal/walpb/record.pb.go b/vendor/go.etcd.io/etcd/wal/walpb/record.pb.go
new file mode 100644
index 000000000000..3ce63ddc2eb6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/walpb/record.pb.go
@@ -0,0 +1,504 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: record.proto
+
+/*
+ Package walpb is a generated protocol buffer package.
+
+ It is generated from these files:
+ record.proto
+
+ It has these top-level messages:
+ Record
+ Snapshot
+*/
+package walpb
+
+import (
+ "fmt"
+
+ proto "github.com/golang/protobuf/proto"
+
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+
+ io "io"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Record struct {
+ Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
+ Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Record) Reset() { *m = Record{} }
+func (m *Record) String() string { return proto.CompactTextString(m) }
+func (*Record) ProtoMessage() {}
+func (*Record) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{0} }
+
+type Snapshot struct {
+ Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
+ Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRecord, []int{1} }
+
+func init() {
+ proto.RegisterType((*Record)(nil), "walpb.Record")
+ proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
+}
+func (m *Record) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Record) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRecord(dAtA, i, uint64(m.Type))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRecord(dAtA, i, uint64(m.Crc))
+ if m.Data != nil {
+ dAtA[i] = 0x1a
+ i++
+ i = encodeVarintRecord(dAtA, i, uint64(len(m.Data)))
+ i += copy(dAtA[i:], m.Data)
+ }
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalTo(dAtA)
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
+ var i int
+ _ = i
+ var l int
+ _ = l
+ dAtA[i] = 0x8
+ i++
+ i = encodeVarintRecord(dAtA, i, uint64(m.Index))
+ dAtA[i] = 0x10
+ i++
+ i = encodeVarintRecord(dAtA, i, uint64(m.Term))
+ if m.XXX_unrecognized != nil {
+ i += copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return i, nil
+}
+
+func encodeVarintRecord(dAtA []byte, offset int, v uint64) int {
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return offset + 1
+}
+func (m *Record) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRecord(uint64(m.Type))
+ n += 1 + sovRecord(uint64(m.Crc))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovRecord(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Snapshot) Size() (n int) {
+ var l int
+ _ = l
+ n += 1 + sovRecord(uint64(m.Index))
+ n += 1 + sovRecord(uint64(m.Term))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovRecord(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+func sozRecord(x uint64) (n int) {
+ return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Record) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Record: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= (int64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
+ }
+ m.Crc = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Crc |= (uint32(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRecord
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRecord(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRecord
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Index |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRecord(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthRecord
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRecord(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ return iNdEx, nil
+ case 1:
+ iNdEx += 8
+ return iNdEx, nil
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ iNdEx += length
+ if length < 0 {
+ return 0, ErrInvalidLengthRecord
+ }
+ return iNdEx, nil
+ case 3:
+ for {
+ var innerWire uint64
+ var start int = iNdEx
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ innerWire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ innerWireType := int(innerWire & 0x7)
+ if innerWireType == 4 {
+ break
+ }
+ next, err := skipRecord(dAtA[start:])
+ if err != nil {
+ return 0, err
+ }
+ iNdEx = start + next
+ }
+ return iNdEx, nil
+ case 4:
+ return iNdEx, nil
+ case 5:
+ iNdEx += 4
+ return iNdEx, nil
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ }
+ panic("unreachable")
+}
+
+var (
+ ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) }
+
+var fileDescriptorRecord = []byte{
+ // 186 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,
+ 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92,
+ 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6,
+ 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d,
+ 0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45,
+ 0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a,
+ 0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11,
+ 0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a,
+ 0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b,
+ 0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc,
+ 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00,
+ 0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/wal/walpb/record.proto b/vendor/go.etcd.io/etcd/wal/walpb/record.proto
new file mode 100644
index 000000000000..b694cb2338aa
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/wal/walpb/record.proto
@@ -0,0 +1,20 @@
+syntax = "proto2";
+package walpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Record {
+ optional int64 type = 1 [(gogoproto.nullable) = false];
+ optional uint32 crc = 2 [(gogoproto.nullable) = false];
+ optional bytes data = 3;
+}
+
+message Snapshot {
+ optional uint64 index = 1 [(gogoproto.nullable) = false];
+ optional uint64 term = 2 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go
new file mode 100644
index 000000000000..fc3116090818
--- /dev/null
+++ b/vendor/golang.org/x/crypto/bcrypt/base64.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypt
+
+import "encoding/base64"
+
+const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+
+var bcEncoding = base64.NewEncoding(alphabet)
+
+func base64Encode(src []byte) []byte {
+ n := bcEncoding.EncodedLen(len(src))
+ dst := make([]byte, n)
+ bcEncoding.Encode(dst, src)
+ for dst[n-1] == '=' {
+ n--
+ }
+ return dst[:n]
+}
+
+func base64Decode(src []byte) ([]byte, error) {
+ numOfEquals := 4 - (len(src) % 4)
+ for i := 0; i < numOfEquals; i++ {
+ src = append(src, '=')
+ }
+
+ dst := make([]byte, bcEncoding.DecodedLen(len(src)))
+ n, err := bcEncoding.Decode(dst, src)
+ if err != nil {
+ return nil, err
+ }
+ return dst[:n], nil
+}
diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
new file mode 100644
index 000000000000..aeb73f81a14c
--- /dev/null
+++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
@@ -0,0 +1,295 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
+// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
+package bcrypt // import "golang.org/x/crypto/bcrypt"
+
+// The code is a port of Provos and Mazières's C implementation.
+import (
+ "crypto/rand"
+ "crypto/subtle"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+
+ "golang.org/x/crypto/blowfish"
+)
+
+const (
+ MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
+ MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
+ DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
+)
+
+// The error returned from CompareHashAndPassword when a password and hash do
+// not match.
+var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
+
+// The error returned from CompareHashAndPassword when a hash is too short to
+// be a bcrypt hash.
+var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
+
+// The error returned from CompareHashAndPassword when a hash was created with
+// a bcrypt algorithm newer than this implementation.
+type HashVersionTooNewError byte
+
+func (hv HashVersionTooNewError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
+}
+
+// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
+type InvalidHashPrefixError byte
+
+func (ih InvalidHashPrefixError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
+}
+
+type InvalidCostError int
+
+func (ic InvalidCostError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
+}
+
+const (
+ majorVersion = '2'
+ minorVersion = 'a'
+ maxSaltSize = 16
+ maxCryptedHashSize = 23
+ encodedSaltSize = 22
+ encodedHashSize = 31
+ minHashSize = 59
+)
+
+// magicCipherData is an IV for the 64 Blowfish encryption calls in
+// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
+var magicCipherData = []byte{
+ 0x4f, 0x72, 0x70, 0x68,
+ 0x65, 0x61, 0x6e, 0x42,
+ 0x65, 0x68, 0x6f, 0x6c,
+ 0x64, 0x65, 0x72, 0x53,
+ 0x63, 0x72, 0x79, 0x44,
+ 0x6f, 0x75, 0x62, 0x74,
+}
+
+type hashed struct {
+ hash []byte
+ salt []byte
+ cost int // allowed range is MinCost to MaxCost
+ major byte
+ minor byte
+}
+
+// GenerateFromPassword returns the bcrypt hash of the password at the given
+// cost. If the cost given is less than MinCost, the cost will be set to
+// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
+// to compare the returned hashed password with its cleartext version.
+func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
+ p, err := newFromPassword(password, cost)
+ if err != nil {
+ return nil, err
+ }
+ return p.Hash(), nil
+}
+
+// CompareHashAndPassword compares a bcrypt hashed password with its possible
+// plaintext equivalent. Returns nil on success, or an error on failure.
+func CompareHashAndPassword(hashedPassword, password []byte) error {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return err
+ }
+
+ otherHash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return err
+ }
+
+ otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
+ if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
+ return nil
+ }
+
+ return ErrMismatchedHashAndPassword
+}
+
+// Cost returns the hashing cost used to create the given hashed
+// password. When, in the future, the hashing cost of a password system needs
+// to be increased in order to adjust for greater computational power, this
+// function allows one to establish which passwords need to be updated.
+func Cost(hashedPassword []byte) (int, error) {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return 0, err
+ }
+ return p.cost, nil
+}
+
+func newFromPassword(password []byte, cost int) (*hashed, error) {
+ if cost < MinCost {
+ cost = DefaultCost
+ }
+ p := new(hashed)
+ p.major = majorVersion
+ p.minor = minorVersion
+
+ err := checkCost(cost)
+ if err != nil {
+ return nil, err
+ }
+ p.cost = cost
+
+ unencodedSalt := make([]byte, maxSaltSize)
+ _, err = io.ReadFull(rand.Reader, unencodedSalt)
+ if err != nil {
+ return nil, err
+ }
+
+ p.salt = base64Encode(unencodedSalt)
+ hash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return nil, err
+ }
+ p.hash = hash
+ return p, err
+}
+
+func newFromHash(hashedSecret []byte) (*hashed, error) {
+ if len(hashedSecret) < minHashSize {
+ return nil, ErrHashTooShort
+ }
+ p := new(hashed)
+ n, err := p.decodeVersion(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+ n, err = p.decodeCost(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+
+ // The "+2" is here because we'll have to append at most 2 '=' to the salt
+ // when base64 decoding it in expensiveBlowfishSetup().
+ p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
+ copy(p.salt, hashedSecret[:encodedSaltSize])
+
+ hashedSecret = hashedSecret[encodedSaltSize:]
+ p.hash = make([]byte, len(hashedSecret))
+ copy(p.hash, hashedSecret)
+
+ return p, nil
+}
+
+func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
+ cipherData := make([]byte, len(magicCipherData))
+ copy(cipherData, magicCipherData)
+
+ c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
+ if err != nil {
+ return nil, err
+ }
+
+ for i := 0; i < 24; i += 8 {
+ for j := 0; j < 64; j++ {
+ c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
+ }
+ }
+
+ // Bug compatibility with C bcrypt implementations. We only encode 23 of
+ // the 24 bytes encrypted.
+ hsh := base64Encode(cipherData[:maxCryptedHashSize])
+ return hsh, nil
+}
+
+func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
+ csalt, err := base64Decode(salt)
+ if err != nil {
+ return nil, err
+ }
+
+ // Bug compatibility with C bcrypt implementations. They use the trailing
+ // NULL in the key string during expansion.
+ // We copy the key to prevent changing the underlying array.
+ ckey := append(key[:len(key):len(key)], 0)
+
+ c, err := blowfish.NewSaltedCipher(ckey, csalt)
+ if err != nil {
+ return nil, err
+ }
+
+ var i, rounds uint64
+ rounds = 1 << cost
+ for i = 0; i < rounds; i++ {
+ blowfish.ExpandKey(ckey, c)
+ blowfish.ExpandKey(csalt, c)
+ }
+
+ return c, nil
+}
+
+func (p *hashed) Hash() []byte {
+ arr := make([]byte, 60)
+ arr[0] = '$'
+ arr[1] = p.major
+ n := 2
+ if p.minor != 0 {
+ arr[2] = p.minor
+ n = 3
+ }
+ arr[n] = '$'
+ n++
+ copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
+ n += 2
+ arr[n] = '$'
+ n++
+ copy(arr[n:], p.salt)
+ n += encodedSaltSize
+ copy(arr[n:], p.hash)
+ n += encodedHashSize
+ return arr[:n]
+}
+
+func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
+ if sbytes[0] != '$' {
+ return -1, InvalidHashPrefixError(sbytes[0])
+ }
+ if sbytes[1] > majorVersion {
+ return -1, HashVersionTooNewError(sbytes[1])
+ }
+ p.major = sbytes[1]
+ n := 3
+ if sbytes[2] != '$' {
+ p.minor = sbytes[2]
+ n++
+ }
+ return n, nil
+}
+
+// sbytes should begin where decodeVersion left off.
+func (p *hashed) decodeCost(sbytes []byte) (int, error) {
+ cost, err := strconv.Atoi(string(sbytes[0:2]))
+ if err != nil {
+ return -1, err
+ }
+ err = checkCost(cost)
+ if err != nil {
+ return -1, err
+ }
+ p.cost = cost
+ return 3, nil
+}
+
+func (p *hashed) String() string {
+ return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
+}
+
+func checkCost(cost int) error {
+ if cost < MinCost || cost > MaxCost {
+ return InvalidCostError(cost)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
new file mode 100644
index 000000000000..5b6c587a9652
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
@@ -0,0 +1,146 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/api/httpbody.proto
+
+package httpbody
+
+import (
+ fmt "fmt"
+ math "math"
+
+ proto "github.com/golang/protobuf/proto"
+ any "github.com/golang/protobuf/ptypes/any"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Message that represents an arbitrary HTTP body. It should only be used for
+// payload formats that can't be represented as JSON, such as raw binary or
+// an HTML page.
+//
+//
+// This message can be used both in streaming and non-streaming API methods in
+// the request as well as the response.
+//
+// It can be used as a top-level request field, which is convenient if one
+// wants to extract parameters from either the URL or HTTP template into the
+// request fields and also want access to the raw HTTP body.
+//
+// Example:
+//
+// message GetResourceRequest {
+// // A unique request id.
+// string request_id = 1;
+//
+// // The raw HTTP body is bound to this field.
+// google.api.HttpBody http_body = 2;
+// }
+//
+// service ResourceService {
+// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);
+// rpc UpdateResource(google.api.HttpBody) returns
+// (google.protobuf.Empty);
+// }
+//
+// Example with streaming methods:
+//
+// service CaldavService {
+// rpc GetCalendar(stream google.api.HttpBody)
+// returns (stream google.api.HttpBody);
+// rpc UpdateCalendar(stream google.api.HttpBody)
+// returns (stream google.api.HttpBody);
+// }
+//
+// Use of this type only changes how the request and response bodies are
+// handled, all other features will continue to work unchanged.
+type HttpBody struct {
+ // The HTTP Content-Type header value specifying the content type of the body.
+ ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+ // The HTTP request/response body as raw binary.
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ // Application specific response metadata. Must be set in the first response
+ // for streaming APIs.
+ Extensions []*any.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *HttpBody) Reset() { *m = HttpBody{} }
+func (m *HttpBody) String() string { return proto.CompactTextString(m) }
+func (*HttpBody) ProtoMessage() {}
+func (*HttpBody) Descriptor() ([]byte, []int) {
+ return fileDescriptor_09ea2ecaa32a0070, []int{0}
+}
+
+func (m *HttpBody) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_HttpBody.Unmarshal(m, b)
+}
+func (m *HttpBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_HttpBody.Marshal(b, m, deterministic)
+}
+func (m *HttpBody) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HttpBody.Merge(m, src)
+}
+func (m *HttpBody) XXX_Size() int {
+ return xxx_messageInfo_HttpBody.Size(m)
+}
+func (m *HttpBody) XXX_DiscardUnknown() {
+ xxx_messageInfo_HttpBody.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HttpBody proto.InternalMessageInfo
+
+func (m *HttpBody) GetContentType() string {
+ if m != nil {
+ return m.ContentType
+ }
+ return ""
+}
+
+func (m *HttpBody) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *HttpBody) GetExtensions() []*any.Any {
+ if m != nil {
+ return m.Extensions
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*HttpBody)(nil), "google.api.HttpBody")
+}
+
+func init() { proto.RegisterFile("google/api/httpbody.proto", fileDescriptor_09ea2ecaa32a0070) }
+
+var fileDescriptor_09ea2ecaa32a0070 = []byte{
+ // 229 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x31, 0x4f, 0xc3, 0x30,
+ 0x10, 0x85, 0xe5, 0xb6, 0x42, 0x70, 0x2d, 0x0c, 0x16, 0x43, 0x60, 0x0a, 0x4c, 0x99, 0x6c, 0x09,
+ 0xd8, 0x3a, 0x35, 0x0b, 0xb0, 0x45, 0x11, 0x13, 0x0b, 0x72, 0x1a, 0xe3, 0x46, 0x2a, 0x77, 0xa7,
+ 0xe6, 0x10, 0xf8, 0xef, 0xf0, 0x2b, 0x19, 0x11, 0x69, 0x2c, 0xe8, 0xf6, 0xe4, 0xef, 0x3d, 0xbf,
+ 0x77, 0x70, 0x11, 0x88, 0xc2, 0xd6, 0x5b, 0xc7, 0x9d, 0xdd, 0x88, 0x70, 0x43, 0x6d, 0x34, 0xbc,
+ 0x23, 0x21, 0x0d, 0x7b, 0x64, 0x1c, 0x77, 0x97, 0xc9, 0x36, 0x90, 0xe6, 0xfd, 0xd5, 0x3a, 0x1c,
+ 0x6d, 0xd7, 0x1f, 0x70, 0xfc, 0x20, 0xc2, 0x25, 0xb5, 0x51, 0x5f, 0xc1, 0x62, 0x4d, 0x28, 0x1e,
+ 0xe5, 0x45, 0x22, 0xfb, 0x4c, 0xe5, 0xaa, 0x38, 0xa9, 0xe7, 0xe3, 0xdb, 0x53, 0x64, 0xaf, 0x35,
+ 0xcc, 0x5a, 0x27, 0x2e, 0x9b, 0xe4, 0xaa, 0x58, 0xd4, 0x83, 0xd6, 0x77, 0x00, 0xfe, 0x53, 0x3c,
+ 0xf6, 0x1d, 0x61, 0x9f, 0x4d, 0xf3, 0x69, 0x31, 0xbf, 0x39, 0x37, 0x63, 0x7d, 0xaa, 0x34, 0x2b,
+ 0x8c, 0xf5, 0x3f, 0x5f, 0xb9, 0x81, 0xb3, 0x35, 0xbd, 0x99, 0xbf, 0x95, 0xe5, 0x69, 0x1a, 0x52,
+ 0xfd, 0x66, 0x2a, 0xf5, 0xbc, 0x1c, 0x61, 0xa0, 0xad, 0xc3, 0x60, 0x68, 0x17, 0x6c, 0xf0, 0x38,
+ 0xfc, 0x68, 0xf7, 0xc8, 0x71, 0xd7, 0x1f, 0x1c, 0xbf, 0x4c, 0xe2, 0x5b, 0xa9, 0xaf, 0xc9, 0xec,
+ 0x7e, 0x55, 0x3d, 0x36, 0x47, 0x43, 0xe2, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x78, 0xb9, 0x16,
+ 0x2b, 0x2d, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go
new file mode 100644
index 000000000000..a0889f0c7a63
--- /dev/null
+++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go
@@ -0,0 +1,282 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/field_mask.proto
+
+package field_mask
+
+import (
+ fmt "fmt"
+ math "math"
+
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `FieldMask` represents a set of symbolic field paths, for example:
+//
+// paths: "f.a"
+// paths: "f.b.d"
+//
+// Here `f` represents a field in some root message, `a` and `b`
+// fields in the message found in `f`, and `d` a field found in the
+// message in `f.b`.
+//
+// Field masks are used to specify a subset of fields that should be
+// returned by a get operation or modified by an update operation.
+// Field masks also have a custom JSON encoding (see below).
+//
+// # Field Masks in Projections
+//
+// When used in the context of a projection, a response message or
+// sub-message is filtered by the API to only contain those fields as
+// specified in the mask. For example, if the mask in the previous
+// example is applied to a response message as follows:
+//
+// f {
+// a : 22
+// b {
+// d : 1
+// x : 2
+// }
+// y : 13
+// }
+// z: 8
+//
+// The result will not contain specific values for fields x,y and z
+// (their value will be set to the default, and omitted in proto text
+// output):
+//
+//
+// f {
+// a : 22
+// b {
+// d : 1
+// }
+// }
+//
+// A repeated field is not allowed except at the last position of a
+// paths string.
+//
+// If a FieldMask object is not present in a get operation, the
+// operation applies to all fields (as if a FieldMask of all fields
+// had been specified).
+//
+// Note that a field mask does not necessarily apply to the
+// top-level response message. In case of a REST get operation, the
+// field mask applies directly to the response, but in case of a REST
+// list operation, the mask instead applies to each individual message
+// in the returned resource list. In case of a REST custom method,
+// other definitions may be used. Where the mask applies will be
+// clearly documented together with its declaration in the API. In
+// any case, the effect on the returned resource/resources is required
+// behavior for APIs.
+//
+// # Field Masks in Update Operations
+//
+// A field mask in update operations specifies which fields of the
+// targeted resource are going to be updated. The API is required
+// to only change the values of the fields as specified in the mask
+// and leave the others untouched. If a resource is passed in to
+// describe the updated values, the API ignores the values of all
+// fields not covered by the mask.
+//
+// If a repeated field is specified for an update operation, new values will
+// be appended to the existing repeated field in the target resource. Note that
+// a repeated field is only allowed in the last position of a `paths` string.
+//
+// If a sub-message is specified in the last position of the field mask for an
+// update operation, then new value will be merged into the existing sub-message
+// in the target resource.
+//
+// For example, given the target message:
+//
+// f {
+// b {
+// d: 1
+// x: 2
+// }
+// c: [1]
+// }
+//
+// And an update message:
+//
+// f {
+// b {
+// d: 10
+// }
+// c: [2]
+// }
+//
+// then if the field mask is:
+//
+// paths: ["f.b", "f.c"]
+//
+// then the result will be:
+//
+// f {
+// b {
+// d: 10
+// x: 2
+// }
+// c: [1, 2]
+// }
+//
+// An implementation may provide options to override this default behavior for
+// repeated and message fields.
+//
+// In order to reset a field's value to the default, the field must
+// be in the mask and set to the default value in the provided resource.
+// Hence, in order to reset all fields of a resource, provide a default
+// instance of the resource and set all fields in the mask, or do
+// not provide a mask as described below.
+//
+// If a field mask is not present on update, the operation applies to
+// all fields (as if a field mask of all fields has been specified).
+// Note that in the presence of schema evolution, this may mean that
+// fields the client does not know and has therefore not filled into
+// the request will be reset to their default. If this is unwanted
+// behavior, a specific service may require a client to always specify
+// a field mask, producing an error if not.
+//
+// As with get operations, the location of the resource which
+// describes the updated values in the request message depends on the
+// operation kind. In any case, the effect of the field mask is
+// required to be honored by the API.
+//
+// ## Considerations for HTTP REST
+//
+// The HTTP kind of an update operation which uses a field mask must
+// be set to PATCH instead of PUT in order to satisfy HTTP semantics
+// (PUT must only be used for full updates).
+//
+// # JSON Encoding of Field Masks
+//
+// In JSON, a field mask is encoded as a single string where paths are
+// separated by a comma. Fields name in each path are converted
+// to/from lower-camel naming conventions.
+//
+// As an example, consider the following message declarations:
+//
+// message Profile {
+// User user = 1;
+// Photo photo = 2;
+// }
+// message User {
+// string display_name = 1;
+// string address = 2;
+// }
+//
+// In proto a field mask for `Profile` may look as such:
+//
+// mask {
+// paths: "user.display_name"
+// paths: "photo"
+// }
+//
+// In JSON, the same mask is represented as below:
+//
+// {
+// mask: "user.displayName,photo"
+// }
+//
+// # Field Masks and Oneof Fields
+//
+// Field masks treat fields in oneofs just as regular fields. Consider the
+// following message:
+//
+// message SampleMessage {
+// oneof test_oneof {
+// string name = 4;
+// SubMessage sub_message = 9;
+// }
+// }
+//
+// The field mask can be:
+//
+// mask {
+// paths: "name"
+// }
+//
+// Or:
+//
+// mask {
+// paths: "sub_message"
+// }
+//
+// Note that oneof type names ("test_oneof" in this case) cannot be used in
+// paths.
+//
+// ## Field Mask Verification
+//
+// The implementation of any API method which has a FieldMask type field in the
+// request should verify the included field paths, and return an
+// `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
+type FieldMask struct {
+ // The set of field mask paths.
+ Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *FieldMask) Reset() { *m = FieldMask{} }
+func (m *FieldMask) String() string { return proto.CompactTextString(m) }
+func (*FieldMask) ProtoMessage() {}
+func (*FieldMask) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5158202634f0da48, []int{0}
+}
+
+func (m *FieldMask) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_FieldMask.Unmarshal(m, b)
+}
+func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic)
+}
+func (m *FieldMask) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_FieldMask.Merge(m, src)
+}
+func (m *FieldMask) XXX_Size() int {
+ return xxx_messageInfo_FieldMask.Size(m)
+}
+func (m *FieldMask) XXX_DiscardUnknown() {
+ xxx_messageInfo_FieldMask.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldMask proto.InternalMessageInfo
+
+func (m *FieldMask) GetPaths() []string {
+ if m != nil {
+ return m.Paths
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
+}
+
+func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) }
+
+var fileDescriptor_5158202634f0da48 = []byte{
+ // 175 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd,
+ 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54,
+ 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16,
+ 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x3d, 0x8c,
+ 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x5a, 0x9d, 0xf8, 0xe0, 0x1a, 0x03, 0x40, 0x42, 0x01,
+ 0x8c, 0x51, 0x96, 0x50, 0x25, 0xe9, 0xf9, 0x39, 0x89, 0x79, 0xe9, 0x7a, 0xf9, 0x45, 0xe9, 0xfa,
+ 0xe9, 0xa9, 0x79, 0x60, 0x0d, 0xd8, 0xdc, 0x64, 0x8d, 0x60, 0xfe, 0x60, 0x64, 0x5c, 0xc4, 0xc4,
+ 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x48, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a,
+ 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24,
+ 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xda, 0xb7, 0xa8, 0xed, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/gopkg.in/robfig/cron.v2/.travis.yml b/vendor/gopkg.in/robfig/cron.v2/.travis.yml
deleted file mode 100644
index 4f2ee4d97338..000000000000
--- a/vendor/gopkg.in/robfig/cron.v2/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: go
diff --git a/vendor/gopkg.in/robfig/cron.v2/LICENSE b/vendor/gopkg.in/robfig/cron.v2/LICENSE
deleted file mode 100644
index 3a0f627ffeb5..000000000000
--- a/vendor/gopkg.in/robfig/cron.v2/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-Copyright (C) 2012 Rob Figueiredo
-All Rights Reserved.
-
-MIT LICENSE
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/gopkg.in/robfig/cron.v2/README.md b/vendor/gopkg.in/robfig/cron.v2/README.md
deleted file mode 100644
index a9db98c35f48..000000000000
--- a/vendor/gopkg.in/robfig/cron.v2/README.md
+++ /dev/null
@@ -1 +0,0 @@
-[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron)
diff --git a/vendor/gopkg.in/robfig/cron.v2/constantdelay.go b/vendor/gopkg.in/robfig/cron.v2/constantdelay.go
deleted file mode 100644
index cd6e7b1be91a..000000000000
--- a/vendor/gopkg.in/robfig/cron.v2/constantdelay.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package cron
-
-import "time"
-
-// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
-// It does not support jobs more frequent than once a second.
-type ConstantDelaySchedule struct {
- Delay time.Duration
-}
-
-// Every returns a crontab Schedule that activates once every duration.
-// Delays of less than a second are not supported (will round up to 1 second).
-// Any fields less than a Second are truncated.
-func Every(duration time.Duration) ConstantDelaySchedule {
- if duration < time.Second {
- duration = time.Second
- }
- return ConstantDelaySchedule{
- Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
- }
-}
-
-// Next returns the next time this should be run.
-// This rounds so that the next activation time will be on the second.
-func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
- return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
-}
diff --git a/vendor/gopkg.in/robfig/cron.v2/cron.go b/vendor/gopkg.in/robfig/cron.v2/cron.go
deleted file mode 100644
index 62d2d839e02f..000000000000
--- a/vendor/gopkg.in/robfig/cron.v2/cron.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Package cron implements a cron spec parser and runner.
-package cron // import "gopkg.in/robfig/cron.v2"
-
-import (
- "sort"
- "time"
-)
-
-// Cron keeps track of any number of entries, invoking the associated func as
-// specified by the schedule. It may be started, stopped, and the entries may
-// be inspected while running.
-type Cron struct {
- entries []*Entry
- stop chan struct{}
- add chan *Entry
- remove chan EntryID
- snapshot chan []Entry
- running bool
- nextID EntryID
-}
-
-// Job is an interface for submitted cron jobs.
-type Job interface {
- Run()
-}
-
-// Schedule describes a job's duty cycle.
-type Schedule interface {
- // Next returns the next activation time, later than the given time.
- // Next is invoked initially, and then each time the job is run.
- Next(time.Time) time.Time
-}
-
-// EntryID identifies an entry within a Cron instance
-type EntryID int
-
-// Entry consists of a schedule and the func to execute on that schedule.
-type Entry struct {
- // ID is the cron-assigned ID of this entry, which may be used to look up a
- // snapshot or remove it.
- ID EntryID
-
- // Schedule on which this job should be run.
- Schedule Schedule
-
- // Next time the job will run, or the zero time if Cron has not been
- // started or this entry's schedule is unsatisfiable
- Next time.Time
-
- // Prev is the last time this job was run, or the zero time if never.
- Prev time.Time
-
- // Job is the thing to run when the Schedule is activated.
- Job Job
-}
-
-// Valid returns true if this is not the zero entry.
-func (e Entry) Valid() bool { return e.ID != 0 }
-
-// byTime is a wrapper for sorting the entry array by time
-// (with zero time at the end).
-type byTime []*Entry
-
-func (s byTime) Len() int { return len(s) }
-func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s byTime) Less(i, j int) bool {
- // Two zero times should return false.
- // Otherwise, zero is "greater" than any other time.
- // (To sort it at the end of the list.)
- if s[i].Next.IsZero() {
- return false
- }
- if s[j].Next.IsZero() {
- return true
- }
- return s[i].Next.Before(s[j].Next)
-}
-
-// New returns a new Cron job runner.
-func New() *Cron {
- return &Cron{
- entries: nil,
- add: make(chan *Entry),
- stop: make(chan struct{}),
- snapshot: make(chan []Entry),
- remove: make(chan EntryID),
- running: false,
- }
-}
-
-// FuncJob is a wrapper that turns a func() into a cron.Job
-type FuncJob func()
-
-func (f FuncJob) Run() { f() }
-
-// AddFunc adds a func to the Cron to be run on the given schedule.
-func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) {
- return c.AddJob(spec, FuncJob(cmd))
-}
-
-// AddJob adds a Job to the Cron to be run on the given schedule.
-func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) {
- schedule, err := Parse(spec)
- if err != nil {
- return 0, err
- }
- return c.Schedule(schedule, cmd), nil
-}
-
-// Schedule adds a Job to the Cron to be run on the given schedule.
-func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID {
- c.nextID++
- entry := &Entry{
- ID: c.nextID,
- Schedule: schedule,
- Job: cmd,
- }
- if !c.running {
- c.entries = append(c.entries, entry)
- } else {
- c.add <- entry
- }
- return entry.ID
-}
-
-// Entries returns a snapshot of the cron entries.
-func (c *Cron) Entries() []Entry {
- if c.running {
- c.snapshot <- nil
- return <-c.snapshot
- }
- return c.entrySnapshot()
-}
-
-// Entry returns a snapshot of the given entry, or nil if it couldn't be found.
-func (c *Cron) Entry(id EntryID) Entry {
- for _, entry := range c.Entries() {
- if id == entry.ID {
- return entry
- }
- }
- return Entry{}
-}
-
-// Remove an entry from being run in the future.
-func (c *Cron) Remove(id EntryID) {
- if c.running {
- c.remove <- id
- } else {
- c.removeEntry(id)
- }
-}
-
-// Start the cron scheduler in its own go-routine.
-func (c *Cron) Start() {
- c.running = true
- go c.run()
-}
-
-// run the scheduler.. this is private just due to the need to synchronize
-// access to the 'running' state variable.
-func (c *Cron) run() {
- // Figure out the next activation times for each entry.
- now := time.Now().Local()
- for _, entry := range c.entries {
- entry.Next = entry.Schedule.Next(now)
- }
-
- for {
- // Determine the next entry to run.
- sort.Sort(byTime(c.entries))
-
- var effective time.Time
- if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
- // If there are no entries yet, just sleep - it still handles new entries
- // and stop requests.
- effective = now.AddDate(10, 0, 0)
- } else {
- effective = c.entries[0].Next
- }
-
- select {
- case now = <-time.After(effective.Sub(now)):
- // Run every entry whose next time was this effective time.
- for _, e := range c.entries {
- if e.Next != effective {
- break
- }
- go e.Job.Run()
- e.Prev = e.Next
- e.Next = e.Schedule.Next(effective)
- }
- continue
-
- case newEntry := <-c.add:
- c.entries = append(c.entries, newEntry)
- newEntry.Next = newEntry.Schedule.Next(now)
-
- case <-c.snapshot:
- c.snapshot <- c.entrySnapshot()
-
- case id := <-c.remove:
- c.removeEntry(id)
-
- case <-c.stop:
- return
- }
-
- now = time.Now().Local()
- }
-}
-
-// Stop the cron scheduler.
-func (c *Cron) Stop() {
- c.stop <- struct{}{}
- c.running = false
-}
-
-// entrySnapshot returns a copy of the current cron entry list.
-func (c *Cron) entrySnapshot() []Entry {
- var entries = make([]Entry, len(c.entries))
- for i, e := range c.entries {
- entries[i] = *e
- }
- return entries
-}
-
-func (c *Cron) removeEntry(id EntryID) {
- var entries []*Entry
- for _, e := range c.entries {
- if e.ID != id {
- entries = append(entries, e)
- }
- }
- c.entries = entries
-}
diff --git a/vendor/gopkg.in/robfig/cron.v2/doc.go b/vendor/gopkg.in/robfig/cron.v2/doc.go
deleted file mode 100644
index 31cd74a62ebb..000000000000
--- a/vendor/gopkg.in/robfig/cron.v2/doc.go
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
-Package cron implements a cron spec parser and job runner.
-
-Usage
-
-Callers may register Funcs to be invoked on a given schedule. Cron will run
-them in their own goroutines.
-
- c := cron.New()
- c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") })
- c.AddFunc("TZ=Asia/Tokyo 30 04 * * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") })
- c.AddFunc("@hourly", func() { fmt.Println("Every hour") })
- c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") })
- c.Start()
- ..
- // Funcs are invoked in their own goroutine, asynchronously.
- ...
- // Funcs may also be added to a running Cron
- c.AddFunc("@daily", func() { fmt.Println("Every day") })
- ..
- // Inspect the cron job entries' next and previous run times.
- inspect(c.Entries())
- ..
- c.Stop() // Stop the scheduler (does not stop any jobs already running).
-
-CRON Expression Format
-
-A cron expression represents a set of times, using 6 space-separated fields.
-
- Field name | Mandatory? | Allowed values | Allowed special characters
- ---------- | ---------- | -------------- | --------------------------
- Seconds | No | 0-59 | * / , -
- Minutes | Yes | 0-59 | * / , -
- Hours | Yes | 0-23 | * / , -
- Day of month | Yes | 1-31 | * / , - ?
- Month | Yes | 1-12 or JAN-DEC | * / , -
- Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
-
-Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun",
-and "sun" are equally accepted.
-
-Special Characters
-
-Asterisk ( * )
-
-The asterisk indicates that the cron expression will match for all values of the
-field; e.g., using an asterisk in the 5th field (month) would indicate every
-month.
-
-Slash ( / )
-
-Slashes are used to describe increments of ranges. For example 3-59/15 in the
-1st field (minutes) would indicate the 3rd minute of the hour and every 15
-minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
-that is, an increment over the largest possible range of the field. The form
-"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
-increment until the end of that specific range. It does not wrap around.
-
-Comma ( , )
-
-Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
-the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
-
-Hyphen ( - )
-
-Hyphens are used to define ranges. For example, 9-17 would indicate every
-hour between 9am and 5pm inclusive.
-
-Question mark ( ? )
-
-Question mark may be used instead of '*' for leaving either day-of-month or
-day-of-week blank.
-
-Predefined schedules
-
-You may use one of several pre-defined schedules in place of a cron expression.
-
- Entry | Description | Equivalent To
- ----- | ----------- | -------------
- @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 *
- @monthly | Run once a month, midnight, first of month | 0 0 0 1 * *
- @weekly | Run once a week, midnight on Sunday | 0 0 0 * * 0
- @daily (or @midnight) | Run once a day, midnight | 0 0 0 * * *
- @hourly | Run once an hour, beginning of hour | 0 0 * * * *
-
-Intervals
-
-You may also schedule a job to execute at fixed intervals. This is supported by
-formatting the cron spec like this:
-
- @every
-
-where "duration" is a string accepted by time.ParseDuration
-(http://golang.org/pkg/time/#ParseDuration).
-
-For example, "@every 1h30m10s" would indicate a schedule that activates every
-1 hour, 30 minutes, 10 seconds.
-
-Note: The interval does not take the job runtime into account. For example,
-if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
-it will have only 2 minutes of idle time between each run.
-
-Time zones
-
-By default, all interpretation and scheduling is done in the machine's local
-time zone (as provided by the Go time package http://www.golang.org/pkg/time).
-The time zone may be overridden by providing an additional space-separated field
-at the beginning of the cron spec, of the form "TZ=Asia/Tokyo"
-
-Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
-not be run!
-
-Thread safety
-
-Since the Cron service runs concurrently with the calling code, some amount of
-care must be taken to ensure proper synchronization.
-
-All cron methods are designed to be correctly synchronized as long as the caller
-ensures that invocations have a clear happens-before ordering between them.
-
-Implementation
-
-Cron entries are stored in an array, sorted by their next activation time. Cron
-sleeps until the next job is due to be run.
-
-Upon waking:
- - it runs each entry that is active on that second
- - it calculates the next run times for the jobs that were run
- - it re-sorts the array of entries by next activation time.
- - it goes to sleep until the soonest job.
-*/
-package cron
diff --git a/vendor/gopkg.in/robfig/cron.v2/parser.go b/vendor/gopkg.in/robfig/cron.v2/parser.go
deleted file mode 100644
index a9e6f947ac7d..000000000000
--- a/vendor/gopkg.in/robfig/cron.v2/parser.go
+++ /dev/null
@@ -1,246 +0,0 @@
-package cron
-
-import (
- "fmt"
- "log"
- "math"
- "strconv"
- "strings"
- "time"
-)
-
-// Parse returns a new crontab schedule representing the given spec.
-// It returns a descriptive error if the spec is not valid.
-//
-// It accepts
-// - Full crontab specs, e.g. "* * * * * ?"
-// - Descriptors, e.g. "@midnight", "@every 1h30m"
-func Parse(spec string) (_ Schedule, err error) {
- // Convert panics into errors
- defer func() {
- if recovered := recover(); recovered != nil {
- err = fmt.Errorf("%v", recovered)
- }
- }()
-
- // Extract timezone if present
- var loc = time.Local
- if strings.HasPrefix(spec, "TZ=") {
- i := strings.Index(spec, " ")
- if loc, err = time.LoadLocation(spec[3:i]); err != nil {
- log.Panicf("Provided bad location %s: %v", spec[3:i], err)
- }
- spec = strings.TrimSpace(spec[i:])
- }
-
- // Handle named schedules (descriptors)
- if strings.HasPrefix(spec, "@") {
- return parseDescriptor(spec, loc), nil
- }
-
- // Split on whitespace. We require 5 or 6 fields.
- // (second, optional) (minute) (hour) (day of month) (month) (day of week)
- fields := strings.Fields(spec)
- if len(fields) != 5 && len(fields) != 6 {
- log.Panicf("Expected 5 or 6 fields, found %d: %s", len(fields), spec)
- }
-
- // Add 0 for second field if necessary.
- if len(fields) == 5 {
- fields = append([]string{"0"}, fields...)
- }
-
- schedule := &SpecSchedule{
- Second: getField(fields[0], seconds),
- Minute: getField(fields[1], minutes),
- Hour: getField(fields[2], hours),
- Dom: getField(fields[3], dom),
- Month: getField(fields[4], months),
- Dow: getField(fields[5], dow),
- Location: loc,
- }
-
- return schedule, nil
-}
-
-// getField returns an Int with the bits set representing all of the times that
-// the field represents. A "field" is a comma-separated list of "ranges".
-func getField(field string, r bounds) uint64 {
- // list = range {"," range}
- var bits uint64
- ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
- for _, expr := range ranges {
- bits |= getRange(expr, r)
- }
- return bits
-}
-
-// getRange returns the bits indicated by the given expression:
-// number | number "-" number [ "/" number ]
-func getRange(expr string, r bounds) uint64 {
- var (
- start, end, step uint
- rangeAndStep = strings.Split(expr, "/")
- lowAndHigh = strings.Split(rangeAndStep[0], "-")
- singleDigit = len(lowAndHigh) == 1
- extraStar uint64
- )
- if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
- start = r.min
- end = r.max
- extraStar = starBit
- } else {
- start = parseIntOrName(lowAndHigh[0], r.names)
- switch len(lowAndHigh) {
- case 1:
- end = start
- case 2:
- end = parseIntOrName(lowAndHigh[1], r.names)
- default:
- log.Panicf("Too many hyphens: %s", expr)
- }
- }
-
- switch len(rangeAndStep) {
- case 1:
- step = 1
- case 2:
- step = mustParseInt(rangeAndStep[1])
-
- // Special handling: "N/step" means "N-max/step".
- if singleDigit {
- end = r.max
- }
- default:
- log.Panicf("Too many slashes: %s", expr)
- }
-
- if start < r.min {
- log.Panicf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
- }
- if end > r.max {
- log.Panicf("End of range (%d) above maximum (%d): %s", end, r.max, expr)
- }
- if start > end {
- log.Panicf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
- }
-
- return getBits(start, end, step) | extraStar
-}
-
-// parseIntOrName returns the (possibly-named) integer contained in expr.
-func parseIntOrName(expr string, names map[string]uint) uint {
- if names != nil {
- if namedInt, ok := names[strings.ToLower(expr)]; ok {
- return namedInt
- }
- }
- return mustParseInt(expr)
-}
-
-// mustParseInt parses the given expression as an int or panics.
-func mustParseInt(expr string) uint {
- num, err := strconv.Atoi(expr)
- if err != nil {
- log.Panicf("Failed to parse int from %s: %s", expr, err)
- }
- if num < 0 {
- log.Panicf("Negative number (%d) not allowed: %s", num, expr)
- }
-
- return uint(num)
-}
-
-// getBits sets all bits in the range [min, max], modulo the given step size.
-func getBits(min, max, step uint) uint64 {
- var bits uint64
-
- // If step is 1, use shifts.
- if step == 1 {
- return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
- }
-
- // Else, use a simple loop.
- for i := min; i <= max; i += step {
- bits |= 1 << i
- }
- return bits
-}
-
-// all returns all bits within the given bounds. (plus the star bit)
-func all(r bounds) uint64 {
- return getBits(r.min, r.max, 1) | starBit
-}
-
-// parseDescriptor returns a pre-defined schedule for the expression, or panics
-// if none matches.
-func parseDescriptor(spec string, loc *time.Location) Schedule {
- switch spec {
- case "@yearly", "@annually":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: 1 << hours.min,
- Dom: 1 << dom.min,
- Month: 1 << months.min,
- Dow: all(dow),
- Location: loc,
- }
-
- case "@monthly":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: 1 << hours.min,
- Dom: 1 << dom.min,
- Month: all(months),
- Dow: all(dow),
- Location: loc,
- }
-
- case "@weekly":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: 1 << hours.min,
- Dom: all(dom),
- Month: all(months),
- Dow: 1 << dow.min,
- Location: loc,
- }
-
- case "@daily", "@midnight":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: 1 << hours.min,
- Dom: all(dom),
- Month: all(months),
- Dow: all(dow),
- Location: loc,
- }
-
- case "@hourly":
- return &SpecSchedule{
- Second: 1 << seconds.min,
- Minute: 1 << minutes.min,
- Hour: all(hours),
- Dom: all(dom),
- Month: all(months),
- Dow: all(dow),
- Location: loc,
- }
- }
-
- const every = "@every "
- if strings.HasPrefix(spec, every) {
- duration, err := time.ParseDuration(spec[len(every):])
- if err != nil {
- log.Panicf("Failed to parse duration %s: %s", spec, err)
- }
- return Every(duration)
- }
-
- log.Panicf("Unrecognized descriptor: %s", spec)
- return nil
-}
diff --git a/vendor/gopkg.in/robfig/cron.v2/spec.go b/vendor/gopkg.in/robfig/cron.v2/spec.go
deleted file mode 100644
index 3dfd3e088a56..000000000000
--- a/vendor/gopkg.in/robfig/cron.v2/spec.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package cron
-
-import "time"
-
-// SpecSchedule specifies a duty cycle (to the second granularity), based on a
-// traditional crontab specification. It is computed initially and stored as bit sets.
-type SpecSchedule struct {
- Second, Minute, Hour, Dom, Month, Dow uint64
- Location *time.Location
-}
-
-// bounds provides a range of acceptable values (plus a map of name to value).
-type bounds struct {
- min, max uint
- names map[string]uint
-}
-
-// The bounds for each field.
-var (
- seconds = bounds{0, 59, nil}
- minutes = bounds{0, 59, nil}
- hours = bounds{0, 23, nil}
- dom = bounds{1, 31, nil}
- months = bounds{1, 12, map[string]uint{
- "jan": 1,
- "feb": 2,
- "mar": 3,
- "apr": 4,
- "may": 5,
- "jun": 6,
- "jul": 7,
- "aug": 8,
- "sep": 9,
- "oct": 10,
- "nov": 11,
- "dec": 12,
- }}
- dow = bounds{0, 6, map[string]uint{
- "sun": 0,
- "mon": 1,
- "tue": 2,
- "wed": 3,
- "thu": 4,
- "fri": 5,
- "sat": 6,
- }}
-)
-
-const (
- // Set the top bit if a star was included in the expression.
- starBit = 1 << 63
-)
-
-// Next returns the next time this schedule is activated, greater than the given
-// time. If no time can be found to satisfy the schedule, return the zero time.
-func (s *SpecSchedule) Next(t time.Time) time.Time {
- // General approach:
- // For Month, Day, Hour, Minute, Second:
- // Check if the time value matches. If yes, continue to the next field.
- // If the field doesn't match the schedule, then increment the field until it matches.
- // While incrementing the field, a wrap-around brings it back to the beginning
- // of the field list (since it is necessary to re-verify previous field
- // values)
-
- // Convert the given time into the schedule's timezone.
- // Save the original timezone so we can convert back after we find a time.
- origLocation := t.Location()
- t = t.In(s.Location)
-
- // Start at the earliest possible time (the upcoming second).
- t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
-
- // This flag indicates whether a field has been incremented.
- added := false
-
- // If no time is found within five years, return zero.
- yearLimit := t.Year() + 5
-
-WRAP:
- if t.Year() > yearLimit {
- return time.Time{}
- }
-
- // Find the first applicable month.
- // If it's this month, then do nothing.
- for 1< 0
- dowMatch bool = 1< 0
- )
-
- if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
- return domMatch && dowMatch
- }
- return domMatch || dowMatch
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 5914e1a5824d..edb097dc3ff4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -352,6 +352,8 @@ github.com/coreos/flannel/subnet/kube
github.com/coreos/go-iptables/iptables
# github.com/coreos/go-oidc v2.1.0+incompatible
github.com/coreos/go-oidc
+# github.com/coreos/go-semver v0.3.0
+github.com/coreos/go-semver/semver
# github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f => github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
github.com/coreos/go-systemd/activation
github.com/coreos/go-systemd/daemon
@@ -418,6 +420,8 @@ github.com/docker/go-units
# github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96
github.com/docker/spdystream
github.com/docker/spdystream/spdy
+# github.com/dustin/go-humanize v1.0.0
+github.com/dustin/go-humanize
# github.com/emicklei/go-restful v2.9.5+incompatible
github.com/emicklei/go-restful
github.com/emicklei/go-restful/log
@@ -429,8 +433,6 @@ github.com/evanphx/json-patch
github.com/exponent-io/jsonpath
# github.com/fatih/camelcase v1.0.0
github.com/fatih/camelcase
-# github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4
-github.com/flosch/pongo2
# github.com/fsnotify/fsnotify v1.4.7
github.com/fsnotify/fsnotify
# github.com/ghodss/yaml v1.0.0
@@ -481,6 +483,9 @@ github.com/golang/mock/gomock
github.com/golang/protobuf/jsonpb
github.com/golang/protobuf/proto
github.com/golang/protobuf/protoc-gen-go/descriptor
+github.com/golang/protobuf/protoc-gen-go/generator
+github.com/golang/protobuf/protoc-gen-go/generator/internal/remap
+github.com/golang/protobuf/protoc-gen-go/plugin
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
@@ -581,8 +586,14 @@ github.com/gorilla/websocket
# github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7
github.com/gregjones/httpcache
github.com/gregjones/httpcache/diskcache
+# github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
+github.com/grpc-ecosystem/go-grpc-middleware
# github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
github.com/grpc-ecosystem/go-grpc-prometheus
+# github.com/grpc-ecosystem/grpc-gateway v1.9.5
+github.com/grpc-ecosystem/grpc-gateway/internal
+github.com/grpc-ecosystem/grpc-gateway/runtime
+github.com/grpc-ecosystem/grpc-gateway/utilities
# github.com/hashicorp/golang-lru v0.5.3
github.com/hashicorp/golang-lru
github.com/hashicorp/golang-lru/simplelru
@@ -613,14 +624,6 @@ github.com/lib/pq/scram
github.com/liggitt/tabwriter
# github.com/lithammer/dedent v1.1.0
github.com/lithammer/dedent
-# github.com/lxc/lxd v0.0.0-20191108214106-60ea15630455
-github.com/lxc/lxd/shared
-github.com/lxc/lxd/shared/api
-github.com/lxc/lxd/shared/cancel
-github.com/lxc/lxd/shared/eagain
-github.com/lxc/lxd/shared/ioprogress
-github.com/lxc/lxd/shared/logger
-github.com/lxc/lxd/shared/units
# github.com/mailru/easyjson v0.7.0
github.com/mailru/easyjson/buffer
github.com/mailru/easyjson/jlexer
@@ -830,6 +833,8 @@ github.com/seccomp/libseccomp-golang
github.com/shurcooL/sanitized_anchor_name
# github.com/sirupsen/logrus v1.4.2
github.com/sirupsen/logrus
+# github.com/soheilhy/cmux v0.1.4
+github.com/soheilhy/cmux
# github.com/spf13/afero v1.2.2
github.com/spf13/afero
github.com/spf13/afero/mem
@@ -843,6 +848,8 @@ github.com/syndtr/gocapability/capability
github.com/tchap/go-patricia/patricia
# github.com/theckman/go-flock v0.7.1
github.com/theckman/go-flock
+# github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8
+github.com/tmc/grpc-websocket-proxy/wsproxy
# github.com/urfave/cli v1.22.2
github.com/urfave/cli
# github.com/urfave/cli/v2 v2.0.0
@@ -879,29 +886,88 @@ github.com/vmware/govmomi/vim25/progress
github.com/vmware/govmomi/vim25/soap
github.com/vmware/govmomi/vim25/types
github.com/vmware/govmomi/vim25/xml
+# github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2
+github.com/xiang90/probing
# go.etcd.io/bbolt v1.3.3
go.etcd.io/bbolt
-# go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738
+# go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f
+go.etcd.io/etcd/auth
go.etcd.io/etcd/auth/authpb
+go.etcd.io/etcd/client
go.etcd.io/etcd/clientv3
go.etcd.io/etcd/clientv3/balancer
go.etcd.io/etcd/clientv3/balancer/connectivity
go.etcd.io/etcd/clientv3/balancer/picker
go.etcd.io/etcd/clientv3/balancer/resolver/endpoint
+go.etcd.io/etcd/clientv3/concurrency
go.etcd.io/etcd/clientv3/credentials
+go.etcd.io/etcd/embed
+go.etcd.io/etcd/etcdserver
+go.etcd.io/etcd/etcdserver/api
+go.etcd.io/etcd/etcdserver/api/etcdhttp
+go.etcd.io/etcd/etcdserver/api/membership
+go.etcd.io/etcd/etcdserver/api/rafthttp
+go.etcd.io/etcd/etcdserver/api/snap
+go.etcd.io/etcd/etcdserver/api/snap/snappb
+go.etcd.io/etcd/etcdserver/api/v2auth
+go.etcd.io/etcd/etcdserver/api/v2discovery
+go.etcd.io/etcd/etcdserver/api/v2error
+go.etcd.io/etcd/etcdserver/api/v2http
+go.etcd.io/etcd/etcdserver/api/v2http/httptypes
+go.etcd.io/etcd/etcdserver/api/v2stats
+go.etcd.io/etcd/etcdserver/api/v2store
+go.etcd.io/etcd/etcdserver/api/v2v3
+go.etcd.io/etcd/etcdserver/api/v3alarm
+go.etcd.io/etcd/etcdserver/api/v3client
+go.etcd.io/etcd/etcdserver/api/v3compactor
+go.etcd.io/etcd/etcdserver/api/v3election
+go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb
+go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw
+go.etcd.io/etcd/etcdserver/api/v3lock
+go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb
+go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw
+go.etcd.io/etcd/etcdserver/api/v3rpc
go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes
go.etcd.io/etcd/etcdserver/etcdserverpb
+go.etcd.io/etcd/etcdserver/etcdserverpb/gw
+go.etcd.io/etcd/lease
+go.etcd.io/etcd/lease/leasehttp
+go.etcd.io/etcd/lease/leasepb
+go.etcd.io/etcd/mvcc
+go.etcd.io/etcd/mvcc/backend
go.etcd.io/etcd/mvcc/mvccpb
+go.etcd.io/etcd/pkg/adt
+go.etcd.io/etcd/pkg/contention
+go.etcd.io/etcd/pkg/cpuutil
+go.etcd.io/etcd/pkg/crc
+go.etcd.io/etcd/pkg/debugutil
+go.etcd.io/etcd/pkg/fileutil
+go.etcd.io/etcd/pkg/flags
+go.etcd.io/etcd/pkg/httputil
+go.etcd.io/etcd/pkg/idutil
+go.etcd.io/etcd/pkg/ioutil
go.etcd.io/etcd/pkg/logutil
+go.etcd.io/etcd/pkg/netutil
+go.etcd.io/etcd/pkg/pathutil
+go.etcd.io/etcd/pkg/pbutil
+go.etcd.io/etcd/pkg/runtime
+go.etcd.io/etcd/pkg/schedule
+go.etcd.io/etcd/pkg/srv
go.etcd.io/etcd/pkg/systemd
go.etcd.io/etcd/pkg/tlsutil
+go.etcd.io/etcd/pkg/traceutil
go.etcd.io/etcd/pkg/transport
go.etcd.io/etcd/pkg/types
+go.etcd.io/etcd/pkg/wait
+go.etcd.io/etcd/proxy/grpcproxy/adapter
go.etcd.io/etcd/raft
go.etcd.io/etcd/raft/confchange
go.etcd.io/etcd/raft/quorum
go.etcd.io/etcd/raft/raftpb
go.etcd.io/etcd/raft/tracker
+go.etcd.io/etcd/version
+go.etcd.io/etcd/wal
+go.etcd.io/etcd/wal/walpb
# go.mongodb.org/mongo-driver v1.1.2
go.mongodb.org/mongo-driver/bson
go.mongodb.org/mongo-driver/bson/bsoncodec
@@ -938,6 +1004,7 @@ go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit
go.uber.org/zap/zapcore
# golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
+golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
golang.org/x/crypto/chacha20
golang.org/x/crypto/cryptobyte
@@ -1078,7 +1145,9 @@ google.golang.org/appengine/internal/remote_api
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55
+google.golang.org/genproto/googleapis/api/httpbody
google.golang.org/genproto/googleapis/rpc/status
+google.golang.org/genproto/protobuf/field_mask
# google.golang.org/grpc v1.26.0
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -1129,8 +1198,6 @@ gopkg.in/gcfg.v1/types
gopkg.in/inf.v0
# gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/natefinch/lumberjack.v2
-# gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5
-gopkg.in/robfig/cron.v2
# gopkg.in/square/go-jose.v2 v2.2.2
gopkg.in/square/go-jose.v2
gopkg.in/square/go-jose.v2/cipher