diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 68d5fe9a5de6..18a28b1aece5 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -13,6 +13,21 @@ "Comment": "null-5", "Rev": "'75cd24fc2f2c2a2088577d12123ddee5f54e0675'" }, + { + "ImportPath": "cloud.google.com/go/compute/metadata", + "Comment": "v0.1.0-115-g3b1ae45", + "Rev": "3b1ae45394a234c385be014e9a488f2bb6eef821" + }, + { + "ImportPath": "cloud.google.com/go/internal", + "Comment": "v0.1.0-115-g3b1ae45", + "Rev": "3b1ae45394a234c385be014e9a488f2bb6eef821" + }, + { + "ImportPath": "cloud.google.com/go/storage", + "Comment": "v0.1.0-115-g3b1ae45", + "Rev": "3b1ae45394a234c385be014e9a488f2bb6eef821" + }, { "ImportPath": "github.com/AaronO/go-git-http", "Rev": "34209cf6cd947cfa52063bcb0f6d43cfa50c5566" @@ -1601,198 +1616,198 @@ }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/http/mux", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/pages/static", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/utils/tail", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.24.0-alpha1-1-gd84e075", - "Rev": "d84e0758ab16ee68598702793119c9a7370c1522" + "Comment": "v0.24.0-5-gef63d70", + "Rev": "ef63d70156d509efbbacfc3e86ed120228fab914" }, { "ImportPath": "github.com/google/certificate-transparency/go", @@ -2240,91 +2255,91 @@ }, { "ImportPath": "github.com/openshift/source-to-image/pkg/api", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/api/describe", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/api/validation", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/build", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/build/strategies", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/build/strategies/layered", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/build/strategies/onbuild", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/build/strategies/sti", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/docker", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/errors", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/ignore", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/scm", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/scm/empty", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/scm/file", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/scm/git", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/scripts", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/tar", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/test", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/util", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/util/glog", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/util/interrupt", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/openshift/source-to-image/pkg/util/user", - "Rev": "2dffea37104471547b865307415876cba2bdf1fc" + "Rev": "5009651d01b7f96b5373979317f4d4f32415f32b" }, { "ImportPath": "github.com/pborman/uuid", @@ -2811,23 +2826,23 @@ }, { "ImportPath": "golang.org/x/oauth2", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" }, { "ImportPath": "golang.org/x/oauth2/google", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" }, { "ImportPath": "golang.org/x/oauth2/internal", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" }, { "ImportPath": "golang.org/x/oauth2/jws", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" }, { "ImportPath": "golang.org/x/oauth2/jwt", - "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" + "Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01" }, { "ImportPath": "golang.org/x/sys/unix", @@ -2879,51 +2894,31 @@ }, { "ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2", - "Rev": "4300f6b0c8a7f09e521dd0af2cee27e28846e037" + "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" }, { "ImportPath": "google.golang.org/api/compute/v1", - "Rev": "4300f6b0c8a7f09e521dd0af2cee27e28846e037" + "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" }, { "ImportPath": "google.golang.org/api/container/v1", - "Rev": "4300f6b0c8a7f09e521dd0af2cee27e28846e037" + "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" }, { "ImportPath": "google.golang.org/api/gensupport", - "Rev": "4300f6b0c8a7f09e521dd0af2cee27e28846e037" + "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" }, { "ImportPath": "google.golang.org/api/googleapi", - "Rev": "4300f6b0c8a7f09e521dd0af2cee27e28846e037" + "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" }, { "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "4300f6b0c8a7f09e521dd0af2cee27e28846e037" + "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" }, { "ImportPath": "google.golang.org/api/storage/v1", - "Rev": "4300f6b0c8a7f09e521dd0af2cee27e28846e037" - }, - { - "ImportPath": "google.golang.org/cloud", - "Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88" - }, - { - "ImportPath": "google.golang.org/cloud/compute/metadata", - "Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88" - }, - { - "ImportPath": "google.golang.org/cloud/internal", - "Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88" - }, - { - "ImportPath": "google.golang.org/cloud/internal/opts", - "Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88" - }, - { - "ImportPath": "google.golang.org/cloud/storage", - "Rev": "eb47ba841d53d93506cfbfbc03927daf9cc48f88" + "Rev": "a69f0f19d246419bb931b0ac8f4f8d3f3e6d4feb" }, { "ImportPath": "google.golang.org/grpc", @@ -3016,351 +3011,507 @@ }, { "ImportPath": "k8s.io/client-go/1.4/discovery", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/dynamic", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/kubernetes", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/batch/v1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/core/v1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/endpoints", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/errors", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/install", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/meta", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/meta/metatypes", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/pod", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/resource", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/service", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/unversioned", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/unversioned/validation", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/util", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/v1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/api/validation", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apimachinery", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apimachinery/registered", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/authorization", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/authorization/install", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/authorization/v1beta1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/apps", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/apps/install", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/authentication", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/authentication/install", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/authentication/v1beta1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/authorization", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/authorization/install", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/authorization/v1beta1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/autoscaling", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/autoscaling/install", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/autoscaling/v1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/batch", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/batch/install", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/batch/v1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/certificates", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/certificates/install", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/certificates/v1alpha1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/componentconfig", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/componentconfig/install", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/extensions", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/extensions/install", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/imagepolicy", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/imagepolicy/install", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/imagepolicy/v1alpha1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/policy", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/policy/install", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/apis/policy/v1alpha1", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/rbac", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/rbac/install", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/rbac/v1alpha1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/storage", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/storage/install", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/apis/storage/v1beta1", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/auth/user", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/capabilities", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/conversion", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/conversion/queryparams", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/fields", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/kubelet/qos", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/kubelet/server/portforward", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/kubelet/types", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/labels", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/master/ports", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/runtime", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/json", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/protobuf", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/recognizer", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/streaming", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/runtime/serializer/versioning", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/security/apparmor", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/selection", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/third_party/forked/golang/json", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/third_party/forked/golang/reflect", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/types", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/clock", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/config", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/crypto", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/util/diff", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/errors", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/flowcontrol", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/framer", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/hash", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/util/homedir", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/util/httpstream", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/integer", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/intstr", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/json", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/labels", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/net", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/net/sets", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/parsers", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/rand", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/runtime", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/sets", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" + }, + { + "ImportPath": "k8s.io/client-go/1.4/pkg/util/strategicpatch", + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/uuid", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/validation", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/validation/field", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/wait", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/util/yaml", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/version", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/watch", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/pkg/watch/versioned", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/rest", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/tools/clientcmd/api", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/tools/metrics", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/client-go/1.4/transport", - "Rev": "82291698d0e5b9f3bfea77ae707aa4e0007d6e76" + "Rev": "d72c0e162789e1bbb33c33cfa26858a1375efe01" }, { "ImportPath": "k8s.io/heapster/metrics/api/v1/types", diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/clientset.go b/pkg/authorization/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..dc804b945f9a --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/doc.go b/pkg/authorization/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..f392a69f3a77 --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/authorization/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/authorization/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/authorization/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..58ee3e99c1e4 --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/authorization/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/authorization/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..ed777732f7ee --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/authorization/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/authorization/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..093d3e4cb590 --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + PoliciesGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Policies(namespace string) PolicyInterface { + return newPolicies(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..daaac2f72daa --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/authorization/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/authorization/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..6e4576036024 --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/authorization/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/authorization/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..972b760548ad --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) Policies(namespace string) v1.PolicyInterface { + return &FakePolicies{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_policy.go b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_policy.go new file mode 100644 index 000000000000..0affe6cb2122 --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_policy.go @@ -0,0 +1,101 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/authorization/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakePolicies implements PolicyInterface +type FakePolicies struct { + Fake *FakeCore + ns string +} + +var policiesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "policies"} + +func (c *FakePolicies) Create(policy *v1.Policy) (result *v1.Policy, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(policiesResource, c.ns, policy), &v1.Policy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Policy), err +} + +func (c *FakePolicies) Update(policy *v1.Policy) (result *v1.Policy, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(policiesResource, c.ns, policy), &v1.Policy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Policy), err +} + +func (c *FakePolicies) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(policiesResource, c.ns, name), &v1.Policy{}) + + return err +} + +func (c *FakePolicies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(policiesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.PolicyList{}) + return err +} + +func (c *FakePolicies) Get(name string) (result *v1.Policy, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(policiesResource, c.ns, name), &v1.Policy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Policy), err +} + +func (c *FakePolicies) List(opts api.ListOptions) (result *v1.PolicyList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(policiesResource, c.ns, opts), &v1.PolicyList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.PolicyList{} + for _, item := range obj.(*v1.PolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested policies. +func (c *FakePolicies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(policiesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched policy. +func (c *FakePolicies) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Policy, err error) { + obj, err := c.Fake. + Invokes(core.NewPatchSubresourceAction(policiesResource, c.ns, name, data, subresources...), &v1.Policy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Policy), err +} diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..07761d78ea47 --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type PolicyExpansion interface{} diff --git a/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/policy.go b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/policy.go new file mode 100644 index 000000000000..4a6f85edbc0f --- /dev/null +++ b/pkg/authorization/client/clientset_generated/release_v1_4/typed/core/v1/policy.go @@ -0,0 +1,135 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/authorization/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// PoliciesGetter has a method to return a PolicyInterface. +// A group's client should implement this interface. +type PoliciesGetter interface { + Policies(namespace string) PolicyInterface +} + +// PolicyInterface has methods to work with Policy resources. +type PolicyInterface interface { + Create(*v1.Policy) (*v1.Policy, error) + Update(*v1.Policy) (*v1.Policy, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Policy, error) + List(opts api.ListOptions) (*v1.PolicyList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Policy, err error) + PolicyExpansion +} + +// policies implements PolicyInterface +type policies struct { + client *CoreClient + ns string +} + +// newPolicies returns a Policies +func newPolicies(c *CoreClient, namespace string) *policies { + return &policies{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a policy and creates it. Returns the server's representation of the policy, and an error, if there is any. +func (c *policies) Create(policy *v1.Policy) (result *v1.Policy, err error) { + result = &v1.Policy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("policies"). + Body(policy). + Do(). + Into(result) + return +} + +// Update takes the representation of a policy and updates it. Returns the server's representation of the policy, and an error, if there is any. +func (c *policies) Update(policy *v1.Policy) (result *v1.Policy, err error) { + result = &v1.Policy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("policies"). + Name(policy.Name). + Body(policy). + Do(). + Into(result) + return +} + +// Delete takes name of the policy and deletes it. Returns an error if one occurs. +func (c *policies) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("policies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *policies) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("policies"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the policy, and returns the corresponding policy object, and an error if there is any. +func (c *policies) Get(name string) (result *v1.Policy, err error) { + result = &v1.Policy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("policies"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Policies that match those selectors. +func (c *policies) List(opts api.ListOptions) (result *v1.PolicyList, err error) { + result = &v1.PolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("policies"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested policies. +func (c *policies) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("policies"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched policy. +func (c *policies) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Policy, err error) { + result = &v1.Policy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("policies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/build/client/clientset_generated/release_v1_4/clientset.go b/pkg/build/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..00c7f7a679b2 --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/build/client/clientset_generated/release_v1_4/doc.go b/pkg/build/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..8f9554344317 --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/build/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/build/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/build/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/build/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..cae6a8b81343 --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/build/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/build/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/build/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..616e1175f79e --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/build/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/build/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/build.go b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/build.go new file mode 100644 index 000000000000..27474cd24014 --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/build.go @@ -0,0 +1,149 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/build/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// BuildsGetter has a method to return a BuildInterface. +// A group's client should implement this interface. +type BuildsGetter interface { + Builds(namespace string) BuildInterface +} + +// BuildInterface has methods to work with Build resources. +type BuildInterface interface { + Create(*v1.Build) (*v1.Build, error) + Update(*v1.Build) (*v1.Build, error) + UpdateStatus(*v1.Build) (*v1.Build, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Build, error) + List(opts api.ListOptions) (*v1.BuildList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Build, err error) + BuildExpansion +} + +// builds implements BuildInterface +type builds struct { + client *CoreClient + ns string +} + +// newBuilds returns a Builds +func newBuilds(c *CoreClient, namespace string) *builds { + return &builds{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a build and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *builds) Create(build *v1.Build) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Post(). + Namespace(c.ns). + Resource("builds"). + Body(build). + Do(). + Into(result) + return +} + +// Update takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *builds) Update(build *v1.Build) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Put(). + Namespace(c.ns). + Resource("builds"). + Name(build.Name). + Body(build). + Do(). + Into(result) + return +} + +func (c *builds) UpdateStatus(build *v1.Build) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Put(). + Namespace(c.ns). + Resource("builds"). + Name(build.Name). + SubResource("status"). + Body(build). + Do(). + Into(result) + return +} + +// Delete takes name of the build and deletes it. Returns an error if one occurs. +func (c *builds) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("builds"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *builds) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("builds"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the build, and returns the corresponding build object, and an error if there is any. +func (c *builds) Get(name string) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Get(). + Namespace(c.ns). + Resource("builds"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Builds that match those selectors. +func (c *builds) List(opts api.ListOptions) (result *v1.BuildList, err error) { + result = &v1.BuildList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("builds"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested builds. +func (c *builds) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("builds"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched build. +func (c *builds) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("builds"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..adface7a928a --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + BuildsGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Builds(namespace string) BuildInterface { + return newBuilds(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..e8fea5d565d7 --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/build/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/build/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..f8feae622106 --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/build/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/build/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_build.go b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_build.go new file mode 100644 index 000000000000..8b7767d008ed --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_build.go @@ -0,0 +1,111 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/build/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeBuilds implements BuildInterface +type FakeBuilds struct { + Fake *FakeCore + ns string +} + +var buildsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "builds"} + +func (c *FakeBuilds) Create(build *v1.Build) (result *v1.Build, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(buildsResource, c.ns, build), &v1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Build), err +} + +func (c *FakeBuilds) Update(build *v1.Build) (result *v1.Build, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(buildsResource, c.ns, build), &v1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Build), err +} + +func (c *FakeBuilds) UpdateStatus(build *v1.Build) (*v1.Build, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(buildsResource, "status", c.ns, build), &v1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Build), err +} + +func (c *FakeBuilds) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(buildsResource, c.ns, name), &v1.Build{}) + + return err +} + +func (c *FakeBuilds) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(buildsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.BuildList{}) + return err +} + +func (c *FakeBuilds) Get(name string) (result *v1.Build, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(buildsResource, c.ns, name), &v1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Build), err +} + +func (c *FakeBuilds) List(opts api.ListOptions) (result *v1.BuildList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(buildsResource, c.ns, opts), &v1.BuildList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.BuildList{} + for _, item := range obj.(*v1.BuildList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested builds. +func (c *FakeBuilds) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(buildsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched build. +func (c *FakeBuilds) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Build, err error) { + obj, err := c.Fake. + Invokes(core.NewPatchSubresourceAction(buildsResource, c.ns, name, data, subresources...), &v1.Build{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Build), err +} diff --git a/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..528b6348b707 --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) Builds(namespace string) v1.BuildInterface { + return &FakeBuilds{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..9a2082a1cbb7 --- /dev/null +++ b/pkg/build/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type BuildExpansion interface{} diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/clientset.go b/pkg/deploy/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..b87753c0a735 --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/doc.go b/pkg/deploy/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..2ffa4bbcdfba --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/deploy/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/deploy/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/deploy/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..4558d8f57197 --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/deploy/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/deploy/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..1abb68c21de2 --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/deploy/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/deploy/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..b20c32372226 --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + DeploymentConfigsGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) DeploymentConfigs(namespace string) DeploymentConfigInterface { + return newDeploymentConfigs(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/deploymentconfig.go b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/deploymentconfig.go new file mode 100644 index 000000000000..1d4f7bba614e --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/deploymentconfig.go @@ -0,0 +1,149 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/deploy/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// DeploymentConfigsGetter has a method to return a DeploymentConfigInterface. +// A group's client should implement this interface. +type DeploymentConfigsGetter interface { + DeploymentConfigs(namespace string) DeploymentConfigInterface +} + +// DeploymentConfigInterface has methods to work with DeploymentConfig resources. +type DeploymentConfigInterface interface { + Create(*v1.DeploymentConfig) (*v1.DeploymentConfig, error) + Update(*v1.DeploymentConfig) (*v1.DeploymentConfig, error) + UpdateStatus(*v1.DeploymentConfig) (*v1.DeploymentConfig, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.DeploymentConfig, error) + List(opts api.ListOptions) (*v1.DeploymentConfigList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.DeploymentConfig, err error) + DeploymentConfigExpansion +} + +// deploymentConfigs implements DeploymentConfigInterface +type deploymentConfigs struct { + client *CoreClient + ns string +} + +// newDeploymentConfigs returns a DeploymentConfigs +func newDeploymentConfigs(c *CoreClient, namespace string) *deploymentConfigs { + return &deploymentConfigs{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a deploymentConfig and creates it. Returns the server's representation of the deploymentConfig, and an error, if there is any. +func (c *deploymentConfigs) Create(deploymentConfig *v1.DeploymentConfig) (result *v1.DeploymentConfig, err error) { + result = &v1.DeploymentConfig{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deploymentconfigs"). + Body(deploymentConfig). + Do(). + Into(result) + return +} + +// Update takes the representation of a deploymentConfig and updates it. Returns the server's representation of the deploymentConfig, and an error, if there is any. +func (c *deploymentConfigs) Update(deploymentConfig *v1.DeploymentConfig) (result *v1.DeploymentConfig, err error) { + result = &v1.DeploymentConfig{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deploymentconfigs"). + Name(deploymentConfig.Name). + Body(deploymentConfig). + Do(). + Into(result) + return +} + +func (c *deploymentConfigs) UpdateStatus(deploymentConfig *v1.DeploymentConfig) (result *v1.DeploymentConfig, err error) { + result = &v1.DeploymentConfig{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deploymentconfigs"). + Name(deploymentConfig.Name). + SubResource("status"). + Body(deploymentConfig). + Do(). + Into(result) + return +} + +// Delete takes name of the deploymentConfig and deletes it. Returns an error if one occurs. +func (c *deploymentConfigs) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deploymentconfigs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deploymentConfigs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deploymentconfigs"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the deploymentConfig, and returns the corresponding deploymentConfig object, and an error if there is any. +func (c *deploymentConfigs) Get(name string) (result *v1.DeploymentConfig, err error) { + result = &v1.DeploymentConfig{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deploymentconfigs"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DeploymentConfigs that match those selectors. +func (c *deploymentConfigs) List(opts api.ListOptions) (result *v1.DeploymentConfigList, err error) { + result = &v1.DeploymentConfigList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deploymentconfigs"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deploymentConfigs. +func (c *deploymentConfigs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("deploymentconfigs"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched deploymentConfig. +func (c *deploymentConfigs) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.DeploymentConfig, err error) { + result = &v1.DeploymentConfig{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deploymentconfigs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..96e1d6985525 --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/deploy/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/deploy/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..7da60fe6af58 --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/deploy/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/deploy/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..5f7e75647ac5 --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) DeploymentConfigs(namespace string) v1.DeploymentConfigInterface { + return &FakeDeploymentConfigs{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_deploymentconfig.go b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_deploymentconfig.go new file mode 100644 index 000000000000..e5411f90d9bd --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_deploymentconfig.go @@ -0,0 +1,111 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/deploy/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeDeploymentConfigs implements DeploymentConfigInterface +type FakeDeploymentConfigs struct { + Fake *FakeCore + ns string +} + +var deploymentconfigsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "deploymentconfigs"} + +func (c *FakeDeploymentConfigs) Create(deploymentConfig *v1.DeploymentConfig) (result *v1.DeploymentConfig, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(deploymentconfigsResource, c.ns, deploymentConfig), &v1.DeploymentConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.DeploymentConfig), err +} + +func (c *FakeDeploymentConfigs) Update(deploymentConfig *v1.DeploymentConfig) (result *v1.DeploymentConfig, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(deploymentconfigsResource, c.ns, deploymentConfig), &v1.DeploymentConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.DeploymentConfig), err +} + +func (c *FakeDeploymentConfigs) UpdateStatus(deploymentConfig *v1.DeploymentConfig) (*v1.DeploymentConfig, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(deploymentconfigsResource, "status", c.ns, deploymentConfig), &v1.DeploymentConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.DeploymentConfig), err +} + +func (c *FakeDeploymentConfigs) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(deploymentconfigsResource, c.ns, name), &v1.DeploymentConfig{}) + + return err +} + +func (c *FakeDeploymentConfigs) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(deploymentconfigsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.DeploymentConfigList{}) + return err +} + +func (c *FakeDeploymentConfigs) Get(name string) (result *v1.DeploymentConfig, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(deploymentconfigsResource, c.ns, name), &v1.DeploymentConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.DeploymentConfig), err +} + +func (c *FakeDeploymentConfigs) List(opts api.ListOptions) (result *v1.DeploymentConfigList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(deploymentconfigsResource, c.ns, opts), &v1.DeploymentConfigList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.DeploymentConfigList{} + for _, item := range obj.(*v1.DeploymentConfigList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deploymentConfigs. +func (c *FakeDeploymentConfigs) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(deploymentconfigsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched deploymentConfig. +func (c *FakeDeploymentConfigs) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.DeploymentConfig, err error) { + obj, err := c.Fake. + Invokes(core.NewPatchSubresourceAction(deploymentconfigsResource, c.ns, name, data, subresources...), &v1.DeploymentConfig{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.DeploymentConfig), err +} diff --git a/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..113ade54be42 --- /dev/null +++ b/pkg/deploy/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type DeploymentConfigExpansion interface{} diff --git a/pkg/image/client/clientset_generated/release_v1_4/clientset.go b/pkg/image/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..ee4c884abc0d --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/image/client/clientset_generated/release_v1_4/doc.go b/pkg/image/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..fdb63ce047ff --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/image/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/image/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/image/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/image/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..29ec1bdd3b8c --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/image/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/image/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/image/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..caf87f8d9a6c --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/image/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/image/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..cd23fa89afb1 --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + ImagesGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Images(namespace string) ImageInterface { + return newImages(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..08474908dd84 --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/image/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/image/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..87505baa8927 --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/image/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/image/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..91bedf75fd8d --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) Images(namespace string) v1.ImageInterface { + return &FakeImages{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_image.go b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_image.go new file mode 100644 index 000000000000..8d78fbbb7976 --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_image.go @@ -0,0 +1,101 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/image/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeImages implements ImageInterface +type FakeImages struct { + Fake *FakeCore + ns string +} + +var imagesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "images"} + +func (c *FakeImages) Create(image *v1.Image) (result *v1.Image, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(imagesResource, c.ns, image), &v1.Image{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Image), err +} + +func (c *FakeImages) Update(image *v1.Image) (result *v1.Image, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(imagesResource, c.ns, image), &v1.Image{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Image), err +} + +func (c *FakeImages) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(imagesResource, c.ns, name), &v1.Image{}) + + return err +} + +func (c *FakeImages) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(imagesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ImageList{}) + return err +} + +func (c *FakeImages) Get(name string) (result *v1.Image, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(imagesResource, c.ns, name), &v1.Image{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Image), err +} + +func (c *FakeImages) List(opts api.ListOptions) (result *v1.ImageList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(imagesResource, c.ns, opts), &v1.ImageList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ImageList{} + for _, item := range obj.(*v1.ImageList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *FakeImages) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(imagesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched image. +func (c *FakeImages) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Image, err error) { + obj, err := c.Fake. + Invokes(core.NewPatchSubresourceAction(imagesResource, c.ns, name, data, subresources...), &v1.Image{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Image), err +} diff --git a/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..7ca89732cdcf --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type ImageExpansion interface{} diff --git a/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/image.go b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/image.go new file mode 100644 index 000000000000..542cadccac3f --- /dev/null +++ b/pkg/image/client/clientset_generated/release_v1_4/typed/core/v1/image.go @@ -0,0 +1,135 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/image/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ImagesGetter has a method to return a ImageInterface. +// A group's client should implement this interface. +type ImagesGetter interface { + Images(namespace string) ImageInterface +} + +// ImageInterface has methods to work with Image resources. +type ImageInterface interface { + Create(*v1.Image) (*v1.Image, error) + Update(*v1.Image) (*v1.Image, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Image, error) + List(opts api.ListOptions) (*v1.ImageList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Image, err error) + ImageExpansion +} + +// images implements ImageInterface +type images struct { + client *CoreClient + ns string +} + +// newImages returns a Images +func newImages(c *CoreClient, namespace string) *images { + return &images{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. +func (c *images) Create(image *v1.Image) (result *v1.Image, err error) { + result = &v1.Image{} + err = c.client.Post(). + Namespace(c.ns). + Resource("images"). + Body(image). + Do(). + Into(result) + return +} + +// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. +func (c *images) Update(image *v1.Image) (result *v1.Image, err error) { + result = &v1.Image{} + err = c.client.Put(). + Namespace(c.ns). + Resource("images"). + Name(image.Name). + Body(image). + Do(). + Into(result) + return +} + +// Delete takes name of the image and deletes it. Returns an error if one occurs. +func (c *images) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("images"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *images) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("images"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the image, and returns the corresponding image object, and an error if there is any. +func (c *images) Get(name string) (result *v1.Image, err error) { + result = &v1.Image{} + err = c.client.Get(). + Namespace(c.ns). + Resource("images"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Images that match those selectors. +func (c *images) List(opts api.ListOptions) (result *v1.ImageList, err error) { + result = &v1.ImageList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("images"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *images) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("images"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched image. +func (c *images) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Image, err error) { + result = &v1.Image{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("images"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/clientset.go b/pkg/oauth/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..839d380671f1 --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/doc.go b/pkg/oauth/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..e443e66336cf --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/oauth/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/oauth/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/oauth/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..84f376271fd8 --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/oauth/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/oauth/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..edb272bf8d24 --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/oauth/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/oauth/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..738349dfa23c --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + OAuthClientsGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) OAuthClients(namespace string) OAuthClientInterface { + return newOAuthClients(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..51c908e0552f --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/oauth/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/oauth/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..e1ea52d09b44 --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/oauth/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/oauth/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..9d2e5b1d0e35 --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) OAuthClients(namespace string) v1.OAuthClientInterface { + return &FakeOAuthClients{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_oauthclient.go b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_oauthclient.go new file mode 100644 index 000000000000..e47d41045909 --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_oauthclient.go @@ -0,0 +1,101 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/oauth/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeOAuthClients implements OAuthClientInterface +type FakeOAuthClients struct { + Fake *FakeCore + ns string +} + +var oauthclientsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "oauthclients"} + +func (c *FakeOAuthClients) Create(oAuthClient *v1.OAuthClient) (result *v1.OAuthClient, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(oauthclientsResource, c.ns, oAuthClient), &v1.OAuthClient{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.OAuthClient), err +} + +func (c *FakeOAuthClients) Update(oAuthClient *v1.OAuthClient) (result *v1.OAuthClient, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(oauthclientsResource, c.ns, oAuthClient), &v1.OAuthClient{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.OAuthClient), err +} + +func (c *FakeOAuthClients) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(oauthclientsResource, c.ns, name), &v1.OAuthClient{}) + + return err +} + +func (c *FakeOAuthClients) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(oauthclientsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.OAuthClientList{}) + return err +} + +func (c *FakeOAuthClients) Get(name string) (result *v1.OAuthClient, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(oauthclientsResource, c.ns, name), &v1.OAuthClient{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.OAuthClient), err +} + +func (c *FakeOAuthClients) List(opts api.ListOptions) (result *v1.OAuthClientList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(oauthclientsResource, c.ns, opts), &v1.OAuthClientList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.OAuthClientList{} + for _, item := range obj.(*v1.OAuthClientList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested oAuthClients. +func (c *FakeOAuthClients) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(oauthclientsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched oAuthClient. +func (c *FakeOAuthClients) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.OAuthClient, err error) { + obj, err := c.Fake. + Invokes(core.NewPatchSubresourceAction(oauthclientsResource, c.ns, name, data, subresources...), &v1.OAuthClient{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.OAuthClient), err +} diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..43ce2a165607 --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type OAuthClientExpansion interface{} diff --git a/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/oauthclient.go b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/oauthclient.go new file mode 100644 index 000000000000..bde9b8aa435a --- /dev/null +++ b/pkg/oauth/client/clientset_generated/release_v1_4/typed/core/v1/oauthclient.go @@ -0,0 +1,135 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/oauth/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// OAuthClientsGetter has a method to return a OAuthClientInterface. +// A group's client should implement this interface. +type OAuthClientsGetter interface { + OAuthClients(namespace string) OAuthClientInterface +} + +// OAuthClientInterface has methods to work with OAuthClient resources. +type OAuthClientInterface interface { + Create(*v1.OAuthClient) (*v1.OAuthClient, error) + Update(*v1.OAuthClient) (*v1.OAuthClient, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.OAuthClient, error) + List(opts api.ListOptions) (*v1.OAuthClientList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.OAuthClient, err error) + OAuthClientExpansion +} + +// oAuthClients implements OAuthClientInterface +type oAuthClients struct { + client *CoreClient + ns string +} + +// newOAuthClients returns a OAuthClients +func newOAuthClients(c *CoreClient, namespace string) *oAuthClients { + return &oAuthClients{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a oAuthClient and creates it. Returns the server's representation of the oAuthClient, and an error, if there is any. +func (c *oAuthClients) Create(oAuthClient *v1.OAuthClient) (result *v1.OAuthClient, err error) { + result = &v1.OAuthClient{} + err = c.client.Post(). + Namespace(c.ns). + Resource("oauthclients"). + Body(oAuthClient). + Do(). + Into(result) + return +} + +// Update takes the representation of a oAuthClient and updates it. Returns the server's representation of the oAuthClient, and an error, if there is any. +func (c *oAuthClients) Update(oAuthClient *v1.OAuthClient) (result *v1.OAuthClient, err error) { + result = &v1.OAuthClient{} + err = c.client.Put(). + Namespace(c.ns). + Resource("oauthclients"). + Name(oAuthClient.Name). + Body(oAuthClient). + Do(). + Into(result) + return +} + +// Delete takes name of the oAuthClient and deletes it. Returns an error if one occurs. +func (c *oAuthClients) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("oauthclients"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *oAuthClients) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("oauthclients"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the oAuthClient, and returns the corresponding oAuthClient object, and an error if there is any. +func (c *oAuthClients) Get(name string) (result *v1.OAuthClient, err error) { + result = &v1.OAuthClient{} + err = c.client.Get(). + Namespace(c.ns). + Resource("oauthclients"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of OAuthClients that match those selectors. +func (c *oAuthClients) List(opts api.ListOptions) (result *v1.OAuthClientList, err error) { + result = &v1.OAuthClientList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("oauthclients"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested oAuthClients. +func (c *oAuthClients) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("oauthclients"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched oAuthClient. +func (c *oAuthClients) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.OAuthClient, err error) { + result = &v1.OAuthClient{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("oauthclients"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/project/client/clientset_generated/release_v1_4/clientset.go b/pkg/project/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..7ffa461eb312 --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/project/client/clientset_generated/release_v1_4/doc.go b/pkg/project/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..315e65913673 --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/project/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/project/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/project/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/project/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..c5b6ddc88625 --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/project/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/project/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/project/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..b67595e807db --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/project/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/project/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..18780a080b6c --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + ProjectsGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Projects() ProjectInterface { + return newProjects(c) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..48b79b20f172 --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/project/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/project/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..7aa911c7383e --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/project/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/project/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..feb32efbdc5f --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) Projects() v1.ProjectInterface { + return &FakeProjects{c} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_project.go b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_project.go new file mode 100644 index 000000000000..004c7002fd06 --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_project.go @@ -0,0 +1,102 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/project/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeProjects implements ProjectInterface +type FakeProjects struct { + Fake *FakeCore +} + +var projectsResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "projects"} + +func (c *FakeProjects) Create(project *v1.Project) (result *v1.Project, err error) { + obj, err := c.Fake. + Invokes(core.NewRootCreateAction(projectsResource, project), &v1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Project), err +} + +func (c *FakeProjects) Update(project *v1.Project) (result *v1.Project, err error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateAction(projectsResource, project), &v1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Project), err +} + +func (c *FakeProjects) UpdateStatus(project *v1.Project) (*v1.Project, error) { + obj, err := c.Fake. + Invokes(core.NewRootUpdateSubresourceAction(projectsResource, "status", project), &v1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Project), err +} + +func (c *FakeProjects) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewRootDeleteAction(projectsResource, name), &v1.Project{}) + return err +} + +func (c *FakeProjects) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewRootDeleteCollectionAction(projectsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ProjectList{}) + return err +} + +func (c *FakeProjects) Get(name string) (result *v1.Project, err error) { + obj, err := c.Fake. + Invokes(core.NewRootGetAction(projectsResource, name), &v1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Project), err +} + +func (c *FakeProjects) List(opts api.ListOptions) (result *v1.ProjectList, err error) { + obj, err := c.Fake. + Invokes(core.NewRootListAction(projectsResource, opts), &v1.ProjectList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ProjectList{} + for _, item := range obj.(*v1.ProjectList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested projects. +func (c *FakeProjects) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewRootWatchAction(projectsResource, opts)) +} + +// Patch applies the patch and returns the patched project. +func (c *FakeProjects) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Project, err error) { + obj, err := c.Fake. + Invokes(core.NewRootPatchSubresourceAction(projectsResource, name, data, subresources...), &v1.Project{}) + if obj == nil { + return nil, err + } + return obj.(*v1.Project), err +} diff --git a/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..45713c0f7f13 --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type ProjectExpansion interface{} diff --git a/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/project.go b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/project.go new file mode 100644 index 000000000000..184f1ec86b82 --- /dev/null +++ b/pkg/project/client/clientset_generated/release_v1_4/typed/core/v1/project.go @@ -0,0 +1,138 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/project/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ProjectsGetter has a method to return a ProjectInterface. +// A group's client should implement this interface. +type ProjectsGetter interface { + Projects() ProjectInterface +} + +// ProjectInterface has methods to work with Project resources. +type ProjectInterface interface { + Create(*v1.Project) (*v1.Project, error) + Update(*v1.Project) (*v1.Project, error) + UpdateStatus(*v1.Project) (*v1.Project, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Project, error) + List(opts api.ListOptions) (*v1.ProjectList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Project, err error) + ProjectExpansion +} + +// projects implements ProjectInterface +type projects struct { + client *CoreClient +} + +// newProjects returns a Projects +func newProjects(c *CoreClient) *projects { + return &projects{ + client: c, + } +} + +// Create takes the representation of a project and creates it. Returns the server's representation of the project, and an error, if there is any. +func (c *projects) Create(project *v1.Project) (result *v1.Project, err error) { + result = &v1.Project{} + err = c.client.Post(). + Resource("projects"). + Body(project). + Do(). + Into(result) + return +} + +// Update takes the representation of a project and updates it. Returns the server's representation of the project, and an error, if there is any. +func (c *projects) Update(project *v1.Project) (result *v1.Project, err error) { + result = &v1.Project{} + err = c.client.Put(). + Resource("projects"). + Name(project.Name). + Body(project). + Do(). + Into(result) + return +} + +func (c *projects) UpdateStatus(project *v1.Project) (result *v1.Project, err error) { + result = &v1.Project{} + err = c.client.Put(). + Resource("projects"). + Name(project.Name). + SubResource("status"). + Body(project). + Do(). + Into(result) + return +} + +// Delete takes name of the project and deletes it. Returns an error if one occurs. +func (c *projects) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("projects"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *projects) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("projects"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the project, and returns the corresponding project object, and an error if there is any. +func (c *projects) Get(name string) (result *v1.Project, err error) { + result = &v1.Project{} + err = c.client.Get(). + Resource("projects"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Projects that match those selectors. +func (c *projects) List(opts api.ListOptions) (result *v1.ProjectList, err error) { + result = &v1.ProjectList{} + err = c.client.Get(). + Resource("projects"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested projects. +func (c *projects) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("projects"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched project. +func (c *projects) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Project, err error) { + result = &v1.Project{} + err = c.client.Patch(pt). + Resource("projects"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/route/client/clientset_generated/release_v1_4/clientset.go b/pkg/route/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..9dc98609ca22 --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/route/client/clientset_generated/release_v1_4/doc.go b/pkg/route/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..6032e0e10b04 --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/route/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/route/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/route/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/route/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..ed59dab7f2a6 --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/route/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/route/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/route/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..3025e84d5aa5 --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/route/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/route/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..413f189e5a20 --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + RoutesGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Routes(namespace string) RouteInterface { + return newRoutes(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..15d7a9e9a2e9 --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/route/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/route/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..434761a683e0 --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/route/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/route/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..d2d803f64b7d --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) Routes(namespace string) v1.RouteInterface { + return &FakeRoutes{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_route.go b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_route.go new file mode 100644 index 000000000000..36f5e3913c3d --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_route.go @@ -0,0 +1,111 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/route/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeRoutes implements RouteInterface +type FakeRoutes struct { + Fake *FakeCore + ns string +} + +var routesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "routes"} + +func (c *FakeRoutes) Create(route *v1.Route) (result *v1.Route, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(routesResource, c.ns, route), &v1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Route), err +} + +func (c *FakeRoutes) Update(route *v1.Route) (result *v1.Route, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(routesResource, c.ns, route), &v1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Route), err +} + +func (c *FakeRoutes) UpdateStatus(route *v1.Route) (*v1.Route, error) { + obj, err := c.Fake. + Invokes(core.NewUpdateSubresourceAction(routesResource, "status", c.ns, route), &v1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Route), err +} + +func (c *FakeRoutes) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(routesResource, c.ns, name), &v1.Route{}) + + return err +} + +func (c *FakeRoutes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(routesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.RouteList{}) + return err +} + +func (c *FakeRoutes) Get(name string) (result *v1.Route, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(routesResource, c.ns, name), &v1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Route), err +} + +func (c *FakeRoutes) List(opts api.ListOptions) (result *v1.RouteList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(routesResource, c.ns, opts), &v1.RouteList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.RouteList{} + for _, item := range obj.(*v1.RouteList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested routes. +func (c *FakeRoutes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(routesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched route. +func (c *FakeRoutes) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Route, err error) { + obj, err := c.Fake. + Invokes(core.NewPatchSubresourceAction(routesResource, c.ns, name, data, subresources...), &v1.Route{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Route), err +} diff --git a/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..564f1c268d11 --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type RouteExpansion interface{} diff --git a/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/route.go b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/route.go new file mode 100644 index 000000000000..a499a5d3fca9 --- /dev/null +++ b/pkg/route/client/clientset_generated/release_v1_4/typed/core/v1/route.go @@ -0,0 +1,149 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/route/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// RoutesGetter has a method to return a RouteInterface. +// A group's client should implement this interface. +type RoutesGetter interface { + Routes(namespace string) RouteInterface +} + +// RouteInterface has methods to work with Route resources. +type RouteInterface interface { + Create(*v1.Route) (*v1.Route, error) + Update(*v1.Route) (*v1.Route, error) + UpdateStatus(*v1.Route) (*v1.Route, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Route, error) + List(opts api.ListOptions) (*v1.RouteList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Route, err error) + RouteExpansion +} + +// routes implements RouteInterface +type routes struct { + client *CoreClient + ns string +} + +// newRoutes returns a Routes +func newRoutes(c *CoreClient, namespace string) *routes { + return &routes{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a route and creates it. Returns the server's representation of the route, and an error, if there is any. +func (c *routes) Create(route *v1.Route) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Post(). + Namespace(c.ns). + Resource("routes"). + Body(route). + Do(). + Into(result) + return +} + +// Update takes the representation of a route and updates it. Returns the server's representation of the route, and an error, if there is any. +func (c *routes) Update(route *v1.Route) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Put(). + Namespace(c.ns). + Resource("routes"). + Name(route.Name). + Body(route). + Do(). + Into(result) + return +} + +func (c *routes) UpdateStatus(route *v1.Route) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Put(). + Namespace(c.ns). + Resource("routes"). + Name(route.Name). + SubResource("status"). + Body(route). + Do(). + Into(result) + return +} + +// Delete takes name of the route and deletes it. Returns an error if one occurs. +func (c *routes) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("routes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *routes) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("routes"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the route, and returns the corresponding route object, and an error if there is any. +func (c *routes) Get(name string) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Get(). + Namespace(c.ns). + Resource("routes"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Routes that match those selectors. +func (c *routes) List(opts api.ListOptions) (result *v1.RouteList, err error) { + result = &v1.RouteList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("routes"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested routes. +func (c *routes) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("routes"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched route. +func (c *routes) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Route, err error) { + result = &v1.Route{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("routes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/clientset.go b/pkg/sdn/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..788fba2c311b --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/doc.go b/pkg/sdn/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..e31d5bcb806c --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/sdn/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/sdn/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/sdn/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..c6ceabce32d8 --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/sdn/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/sdn/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..49d1503cfc5b --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/sdn/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/sdn/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/clusternetwork.go b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/clusternetwork.go new file mode 100644 index 000000000000..b1b5418384da --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/clusternetwork.go @@ -0,0 +1,135 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/sdn/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// ClusterNetworksGetter has a method to return a ClusterNetworkInterface. +// A group's client should implement this interface. +type ClusterNetworksGetter interface { + ClusterNetworks(namespace string) ClusterNetworkInterface +} + +// ClusterNetworkInterface has methods to work with ClusterNetwork resources. +type ClusterNetworkInterface interface { + Create(*v1.ClusterNetwork) (*v1.ClusterNetwork, error) + Update(*v1.ClusterNetwork) (*v1.ClusterNetwork, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.ClusterNetwork, error) + List(opts api.ListOptions) (*v1.ClusterNetworkList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.ClusterNetwork, err error) + ClusterNetworkExpansion +} + +// clusterNetworks implements ClusterNetworkInterface +type clusterNetworks struct { + client *CoreClient + ns string +} + +// newClusterNetworks returns a ClusterNetworks +func newClusterNetworks(c *CoreClient, namespace string) *clusterNetworks { + return &clusterNetworks{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a clusterNetwork and creates it. Returns the server's representation of the clusterNetwork, and an error, if there is any. +func (c *clusterNetworks) Create(clusterNetwork *v1.ClusterNetwork) (result *v1.ClusterNetwork, err error) { + result = &v1.ClusterNetwork{} + err = c.client.Post(). + Namespace(c.ns). + Resource("clusternetworks"). + Body(clusterNetwork). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterNetwork and updates it. Returns the server's representation of the clusterNetwork, and an error, if there is any. +func (c *clusterNetworks) Update(clusterNetwork *v1.ClusterNetwork) (result *v1.ClusterNetwork, err error) { + result = &v1.ClusterNetwork{} + err = c.client.Put(). + Namespace(c.ns). + Resource("clusternetworks"). + Name(clusterNetwork.Name). + Body(clusterNetwork). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterNetwork and deletes it. Returns an error if one occurs. +func (c *clusterNetworks) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("clusternetworks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterNetworks) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("clusternetworks"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterNetwork, and returns the corresponding clusterNetwork object, and an error if there is any. +func (c *clusterNetworks) Get(name string) (result *v1.ClusterNetwork, err error) { + result = &v1.ClusterNetwork{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusternetworks"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterNetworks that match those selectors. +func (c *clusterNetworks) List(opts api.ListOptions) (result *v1.ClusterNetworkList, err error) { + result = &v1.ClusterNetworkList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("clusternetworks"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterNetworks. +func (c *clusterNetworks) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("clusternetworks"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterNetwork. +func (c *clusterNetworks) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.ClusterNetwork, err error) { + result = &v1.ClusterNetwork{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("clusternetworks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..76c32821308b --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + ClusterNetworksGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) ClusterNetworks(namespace string) ClusterNetworkInterface { + return newClusterNetworks(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..166f75165095 --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/sdn/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/sdn/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..30de8edf07d2 --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/sdn/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/sdn/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_clusternetwork.go b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_clusternetwork.go new file mode 100644 index 000000000000..56d97e0f8c9a --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_clusternetwork.go @@ -0,0 +1,101 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/sdn/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeClusterNetworks implements ClusterNetworkInterface +type FakeClusterNetworks struct { + Fake *FakeCore + ns string +} + +var clusternetworksResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "clusternetworks"} + +func (c *FakeClusterNetworks) Create(clusterNetwork *v1.ClusterNetwork) (result *v1.ClusterNetwork, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(clusternetworksResource, c.ns, clusterNetwork), &v1.ClusterNetwork{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ClusterNetwork), err +} + +func (c *FakeClusterNetworks) Update(clusterNetwork *v1.ClusterNetwork) (result *v1.ClusterNetwork, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(clusternetworksResource, c.ns, clusterNetwork), &v1.ClusterNetwork{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ClusterNetwork), err +} + +func (c *FakeClusterNetworks) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(clusternetworksResource, c.ns, name), &v1.ClusterNetwork{}) + + return err +} + +func (c *FakeClusterNetworks) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(clusternetworksResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.ClusterNetworkList{}) + return err +} + +func (c *FakeClusterNetworks) Get(name string) (result *v1.ClusterNetwork, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(clusternetworksResource, c.ns, name), &v1.ClusterNetwork{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ClusterNetwork), err +} + +func (c *FakeClusterNetworks) List(opts api.ListOptions) (result *v1.ClusterNetworkList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(clusternetworksResource, c.ns, opts), &v1.ClusterNetworkList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.ClusterNetworkList{} + for _, item := range obj.(*v1.ClusterNetworkList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterNetworks. +func (c *FakeClusterNetworks) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(clusternetworksResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched clusterNetwork. +func (c *FakeClusterNetworks) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.ClusterNetwork, err error) { + obj, err := c.Fake. + Invokes(core.NewPatchSubresourceAction(clusternetworksResource, c.ns, name, data, subresources...), &v1.ClusterNetwork{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.ClusterNetwork), err +} diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..fd931d7177e9 --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) ClusterNetworks(namespace string) v1.ClusterNetworkInterface { + return &FakeClusterNetworks{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..7aa2a123fc60 --- /dev/null +++ b/pkg/sdn/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type ClusterNetworkExpansion interface{} diff --git a/pkg/template/client/clientset_generated/release_v1_4/clientset.go b/pkg/template/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..1ac2110cfd17 --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/template/client/clientset_generated/release_v1_4/doc.go b/pkg/template/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..e1e8996c26d8 --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/template/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/template/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/template/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/template/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..c66cebc5b02a --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/template/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/template/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/template/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..4745adeeb86e --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/template/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/template/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..85d1b4b63af0 --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + TemplatesGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Templates(namespace string) TemplateInterface { + return newTemplates(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..c130f5566ffc --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/template/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/template/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..6e4ff8dddc56 --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/template/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/template/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..0bc342987b77 --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) Templates(namespace string) v1.TemplateInterface { + return &FakeTemplates{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_template.go b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_template.go new file mode 100644 index 000000000000..d45af4a179ba --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_template.go @@ -0,0 +1,101 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/template/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeTemplates implements TemplateInterface +type FakeTemplates struct { + Fake *FakeCore + ns string +} + +var templatesResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "templates"} + +func (c *FakeTemplates) Create(template *v1.Template) (result *v1.Template, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(templatesResource, c.ns, template), &v1.Template{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Template), err +} + +func (c *FakeTemplates) Update(template *v1.Template) (result *v1.Template, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(templatesResource, c.ns, template), &v1.Template{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Template), err +} + +func (c *FakeTemplates) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(templatesResource, c.ns, name), &v1.Template{}) + + return err +} + +func (c *FakeTemplates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(templatesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.TemplateList{}) + return err +} + +func (c *FakeTemplates) Get(name string) (result *v1.Template, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(templatesResource, c.ns, name), &v1.Template{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Template), err +} + +func (c *FakeTemplates) List(opts api.ListOptions) (result *v1.TemplateList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(templatesResource, c.ns, opts), &v1.TemplateList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.TemplateList{} + for _, item := range obj.(*v1.TemplateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested templates. +func (c *FakeTemplates) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(templatesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched template. +func (c *FakeTemplates) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Template, err error) { + obj, err := c.Fake. + Invokes(core.NewPatchSubresourceAction(templatesResource, c.ns, name, data, subresources...), &v1.Template{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.Template), err +} diff --git a/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..435a7ac7a89e --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type TemplateExpansion interface{} diff --git a/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/template.go b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/template.go new file mode 100644 index 000000000000..93a421bdc80b --- /dev/null +++ b/pkg/template/client/clientset_generated/release_v1_4/typed/core/v1/template.go @@ -0,0 +1,135 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/template/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// TemplatesGetter has a method to return a TemplateInterface. +// A group's client should implement this interface. +type TemplatesGetter interface { + Templates(namespace string) TemplateInterface +} + +// TemplateInterface has methods to work with Template resources. +type TemplateInterface interface { + Create(*v1.Template) (*v1.Template, error) + Update(*v1.Template) (*v1.Template, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.Template, error) + List(opts api.ListOptions) (*v1.TemplateList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Template, err error) + TemplateExpansion +} + +// templates implements TemplateInterface +type templates struct { + client *CoreClient + ns string +} + +// newTemplates returns a Templates +func newTemplates(c *CoreClient, namespace string) *templates { + return &templates{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a template and creates it. Returns the server's representation of the template, and an error, if there is any. +func (c *templates) Create(template *v1.Template) (result *v1.Template, err error) { + result = &v1.Template{} + err = c.client.Post(). + Namespace(c.ns). + Resource("templates"). + Body(template). + Do(). + Into(result) + return +} + +// Update takes the representation of a template and updates it. Returns the server's representation of the template, and an error, if there is any. +func (c *templates) Update(template *v1.Template) (result *v1.Template, err error) { + result = &v1.Template{} + err = c.client.Put(). + Namespace(c.ns). + Resource("templates"). + Name(template.Name). + Body(template). + Do(). + Into(result) + return +} + +// Delete takes name of the template and deletes it. Returns an error if one occurs. +func (c *templates) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("templates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *templates) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("templates"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the template, and returns the corresponding template object, and an error if there is any. +func (c *templates) Get(name string) (result *v1.Template, err error) { + result = &v1.Template{} + err = c.client.Get(). + Namespace(c.ns). + Resource("templates"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Templates that match those selectors. +func (c *templates) List(opts api.ListOptions) (result *v1.TemplateList, err error) { + result = &v1.TemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("templates"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested templates. +func (c *templates) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("templates"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched template. +func (c *templates) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Template, err error) { + result = &v1.Template{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("templates"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/user/client/clientset_generated/release_v1_4/clientset.go b/pkg/user/client/clientset_generated/release_v1_4/clientset.go new file mode 100644 index 000000000000..4ee5abbb645d --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/clientset.go @@ -0,0 +1,74 @@ +package release_v1_4 + +import ( + "github.com/golang/glog" + v1core "github.com/openshift/origin/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + discovery "k8s.io/kubernetes/pkg/client/typed/discovery" + "k8s.io/kubernetes/pkg/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + Core() v1core.CoreInterface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + *v1core.CoreClient +} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + if c == nil { + return nil + } + return c.CoreClient +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *restclient.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var clientset Clientset + var err error + clientset.CoreClient, err = v1core.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + glog.Errorf("failed to create the DiscoveryClient: %v", err) + return nil, err + } + return &clientset, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.NewForConfigOrDie(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &clientset +} + +// New creates a new Clientset for the given RESTClient. +func New(c *restclient.RESTClient) *Clientset { + var clientset Clientset + clientset.CoreClient = v1core.New(c) + + clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &clientset +} diff --git a/pkg/user/client/clientset_generated/release_v1_4/doc.go b/pkg/user/client/clientset_generated/release_v1_4/doc.go new file mode 100644 index 000000000000..1a9991bc47d6 --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/user/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/user/api --output-base=../../.. + +// This package has the automatically generated clientset. +package release_v1_4 diff --git a/pkg/user/client/clientset_generated/release_v1_4/fake/clientset_generated.go b/pkg/user/client/clientset_generated/release_v1_4/fake/clientset_generated.go new file mode 100644 index 000000000000..b42bdbc0ed7a --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/fake/clientset_generated.go @@ -0,0 +1,52 @@ +package fake + +import ( + clientset "github.com/openshift/origin/pkg/user/client/clientset_generated/release_v1_4" + v1core "github.com/openshift/origin/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1" + fakev1core "github.com/openshift/origin/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apimachinery/registered" + "k8s.io/kubernetes/pkg/client/testing/core" + "k8s.io/kubernetes/pkg/client/typed/discovery" + fakediscovery "k8s.io/kubernetes/pkg/client/typed/discovery/fake" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/watch" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + fakePtr := core.Fake{} + fakePtr.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper())) + + fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil)) + + return &Clientset{fakePtr} +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + core.Fake +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return &fakediscovery.FakeDiscovery{Fake: &c.Fake} +} + +var _ clientset.Interface = &Clientset{} + +// Core retrieves the CoreClient +func (c *Clientset) Core() v1core.CoreInterface { + return &fakev1core.FakeCore{Fake: &c.Fake} +} diff --git a/pkg/user/client/clientset_generated/release_v1_4/fake/doc.go b/pkg/user/client/clientset_generated/release_v1_4/fake/doc.go new file mode 100644 index 000000000000..aa1b1ecf548b --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/user/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/user/api --output-base=../../.. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go new file mode 100644 index 000000000000..77d96f32f6a9 --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/core_client.go @@ -0,0 +1,80 @@ +package v1 + +import ( + api "k8s.io/kubernetes/pkg/api" + registered "k8s.io/kubernetes/pkg/apimachinery/registered" + restclient "k8s.io/kubernetes/pkg/client/restclient" + serializer "k8s.io/kubernetes/pkg/runtime/serializer" +) + +type CoreInterface interface { + GetRESTClient() *restclient.RESTClient + UsersGetter +} + +// CoreClient is used to interact with features provided by the Core group. +type CoreClient struct { + *restclient.RESTClient +} + +func (c *CoreClient) Users(namespace string) UserInterface { + return newUsers(c, namespace) +} + +// NewForConfig creates a new CoreClient for the given config. +func NewForConfig(c *restclient.Config) (*CoreClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreClient{client}, nil +} + +// NewForConfigOrDie creates a new CoreClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *restclient.Config) *CoreClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreClient for the given RESTClient. +func New(c *restclient.RESTClient) *CoreClient { + return &CoreClient{c} +} + +func setConfigDefaults(config *restclient.Config) error { + // if core group is not registered, return an error + g, err := registered.Group("") + if err != nil { + return err + } + config.APIPath = "/oapi" + if config.UserAgent == "" { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreClient) GetRESTClient() *restclient.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/doc.go b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/doc.go new file mode 100644 index 000000000000..bf7effc3c93b --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/user/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/user/api --output-base=../../.. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go new file mode 100644 index 000000000000..359264c7b38f --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake/doc.go @@ -0,0 +1,4 @@ +// This package is generated by client-gen with arguments: --clientset-api-path=/oapi --clientset-name=release_v1_4 --clientset-path=github.com/openshift/origin/pkg/user/client/clientset_generated --go-header-file=hack/boilerplate.txt --input=[api/v1] --input-base=github.com/openshift/origin/pkg/user/api --output-base=../../.. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go new file mode 100644 index 000000000000..6c0772deabfb --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_core_client.go @@ -0,0 +1,21 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1" + restclient "k8s.io/kubernetes/pkg/client/restclient" + core "k8s.io/kubernetes/pkg/client/testing/core" +) + +type FakeCore struct { + *core.Fake +} + +func (c *FakeCore) Users(namespace string) v1.UserInterface { + return &FakeUsers{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCore) GetRESTClient() *restclient.RESTClient { + return nil +} diff --git a/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_user.go b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_user.go new file mode 100644 index 000000000000..c98fddb1eb82 --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/fake/fake_user.go @@ -0,0 +1,101 @@ +package fake + +import ( + v1 "github.com/openshift/origin/pkg/user/api/v1" + api "k8s.io/kubernetes/pkg/api" + unversioned "k8s.io/kubernetes/pkg/api/unversioned" + core "k8s.io/kubernetes/pkg/client/testing/core" + labels "k8s.io/kubernetes/pkg/labels" + watch "k8s.io/kubernetes/pkg/watch" +) + +// FakeUsers implements UserInterface +type FakeUsers struct { + Fake *FakeCore + ns string +} + +var usersResource = unversioned.GroupVersionResource{Group: "", Version: "v1", Resource: "users"} + +func (c *FakeUsers) Create(user *v1.User) (result *v1.User, err error) { + obj, err := c.Fake. + Invokes(core.NewCreateAction(usersResource, c.ns, user), &v1.User{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.User), err +} + +func (c *FakeUsers) Update(user *v1.User) (result *v1.User, err error) { + obj, err := c.Fake. + Invokes(core.NewUpdateAction(usersResource, c.ns, user), &v1.User{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.User), err +} + +func (c *FakeUsers) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(core.NewDeleteAction(usersResource, c.ns, name), &v1.User{}) + + return err +} + +func (c *FakeUsers) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := core.NewDeleteCollectionAction(usersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1.UserList{}) + return err +} + +func (c *FakeUsers) Get(name string) (result *v1.User, err error) { + obj, err := c.Fake. + Invokes(core.NewGetAction(usersResource, c.ns, name), &v1.User{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.User), err +} + +func (c *FakeUsers) List(opts api.ListOptions) (result *v1.UserList, err error) { + obj, err := c.Fake. + Invokes(core.NewListAction(usersResource, c.ns, opts), &v1.UserList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1.UserList{} + for _, item := range obj.(*v1.UserList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested users. +func (c *FakeUsers) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(core.NewWatchAction(usersResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched user. +func (c *FakeUsers) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.User, err error) { + obj, err := c.Fake. + Invokes(core.NewPatchSubresourceAction(usersResource, c.ns, name, data, subresources...), &v1.User{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.User), err +} diff --git a/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go new file mode 100644 index 000000000000..df295f6c5022 --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/generated_expansion.go @@ -0,0 +1,3 @@ +package v1 + +type UserExpansion interface{} diff --git a/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/user.go b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/user.go new file mode 100644 index 000000000000..758aa38743f5 --- /dev/null +++ b/pkg/user/client/clientset_generated/release_v1_4/typed/core/v1/user.go @@ -0,0 +1,135 @@ +package v1 + +import ( + v1 "github.com/openshift/origin/pkg/user/api/v1" + api "k8s.io/kubernetes/pkg/api" + watch "k8s.io/kubernetes/pkg/watch" +) + +// UsersGetter has a method to return a UserInterface. +// A group's client should implement this interface. +type UsersGetter interface { + Users(namespace string) UserInterface +} + +// UserInterface has methods to work with User resources. +type UserInterface interface { + Create(*v1.User) (*v1.User, error) + Update(*v1.User) (*v1.User, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1.User, error) + List(opts api.ListOptions) (*v1.UserList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.User, err error) + UserExpansion +} + +// users implements UserInterface +type users struct { + client *CoreClient + ns string +} + +// newUsers returns a Users +func newUsers(c *CoreClient, namespace string) *users { + return &users{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a user and creates it. Returns the server's representation of the user, and an error, if there is any. +func (c *users) Create(user *v1.User) (result *v1.User, err error) { + result = &v1.User{} + err = c.client.Post(). + Namespace(c.ns). + Resource("users"). + Body(user). + Do(). + Into(result) + return +} + +// Update takes the representation of a user and updates it. Returns the server's representation of the user, and an error, if there is any. +func (c *users) Update(user *v1.User) (result *v1.User, err error) { + result = &v1.User{} + err = c.client.Put(). + Namespace(c.ns). + Resource("users"). + Name(user.Name). + Body(user). + Do(). + Into(result) + return +} + +// Delete takes name of the user and deletes it. Returns an error if one occurs. +func (c *users) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("users"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *users) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("users"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the user, and returns the corresponding user object, and an error if there is any. +func (c *users) Get(name string) (result *v1.User, err error) { + result = &v1.User{} + err = c.client.Get(). + Namespace(c.ns). + Resource("users"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Users that match those selectors. +func (c *users) List(opts api.ListOptions) (result *v1.UserList, err error) { + result = &v1.UserList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("users"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested users. +func (c *users) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("users"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched user. +func (c *users) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.User, err error) { + result = &v1.User{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("users"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/tools/rebasehelpers/util/git.go b/tools/rebasehelpers/util/git.go index 94c174ec0564..5d7933d591ab 100644 --- a/tools/rebasehelpers/util/git.go +++ b/tools/rebasehelpers/util/git.go @@ -20,6 +20,7 @@ var UpstreamSummaryPattern = regexp.MustCompile(`UPSTREAM: (revert: [a-f0-9]{7,} // here. var SupportedHosts = map[string]int{ "bitbucket.org": 3, + "cloud.google.com": 2, "code.google.com": 3, "github.com": 3, "golang.org": 3, diff --git a/vendor/google.golang.org/cloud/LICENSE b/vendor/cloud.google.com/go/LICENSE similarity index 100% rename from vendor/google.golang.org/cloud/LICENSE rename to vendor/cloud.google.com/go/LICENSE diff --git a/vendor/google.golang.org/cloud/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go similarity index 80% rename from vendor/google.golang.org/cloud/compute/metadata/metadata.go rename to vendor/cloud.google.com/go/compute/metadata/metadata.go index 0a709598df02..f9d2bef6c2f7 100644 --- a/vendor/google.golang.org/cloud/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -17,7 +17,7 @@ // // This package is a wrapper around the GCE metadata service, // as documented at https://developers.google.com/compute/docs/metadata. -package metadata +package metadata // import "cloud.google.com/go/compute/metadata" import ( "encoding/json" @@ -27,6 +27,7 @@ import ( "net/http" "net/url" "os" + "runtime" "strings" "sync" "time" @@ -34,11 +35,20 @@ import ( "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" - "google.golang.org/cloud/internal" + "cloud.google.com/go/internal" ) -// metadataIP is the documented metadata server IP address. -const metadataIP = "169.254.169.254" +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" +) type cachedValue struct { k string @@ -110,7 +120,7 @@ func getETag(client *http.Client, suffix string) (value, etag string, err error) // deployments. To enable spoofing of the metadata service, the environment // variable GCE_METADATA_HOST is first inspected to decide where metadata // requests shall go. - host := os.Getenv("GCE_METADATA_HOST") + host := os.Getenv(metadataHostEnv) if host == "" { // Using 169.254.169.254 instead of "metadata" here because Go // binaries built with the "netgo" tag and without cgo won't @@ -163,32 +173,34 @@ func (c *cachedValue) get() (v string, err error) { return } -var onGCE struct { - sync.Mutex - set bool - v bool -} +var ( + onGCEOnce sync.Once + onGCE bool +) // OnGCE reports whether this process is running on Google Compute Engine. func OnGCE() bool { - defer onGCE.Unlock() - onGCE.Lock() - if onGCE.set { - return onGCE.v - } - onGCE.set = true - onGCE.v = testOnGCE() - return onGCE.v + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() } func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + ctx, cancel := context.WithCancel(context.Background()) defer cancel() resc := make(chan bool, 2) // Try two strategies in parallel. - // See https://github.com/GoogleCloudPlatform/gcloud-golang/issues/194 + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 go func() { res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) if err != nil { @@ -208,9 +220,53 @@ func testOnGCE() bool { resc <- strsContains(addrs, metadataIP) }() + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). return <-resc } +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + // Subscribe subscribes to a value from the metadata service. // The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". // The suffix may contain query parameters. diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go new file mode 100644 index 000000000000..8e0c8f8e525c --- /dev/null +++ b/vendor/cloud.google.com/go/internal/cloud.go @@ -0,0 +1,64 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" +) + +const userAgent = "gcloud-golang/0.1" + +// Transport is an http.RoundTripper that appends Google Cloud client's +// user-agent to the original request's user-agent header. +type Transport struct { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} diff --git a/vendor/google.golang.org/cloud/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go similarity index 93% rename from vendor/google.golang.org/cloud/storage/acl.go rename to vendor/cloud.google.com/go/storage/acl.go index 1c7be3211050..e4d968b0ac6d 100644 --- a/vendor/google.golang.org/cloud/storage/acl.go +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -96,17 +96,7 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { if err != nil { return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err) } - r := make([]ACLRule, 0, len(acls.Items)) - for _, v := range acls.Items { - if m, ok := v.(map[string]interface{}); ok { - entity, ok1 := m["entity"].(string) - role, ok2 := m["role"].(string) - if ok1 && ok2 { - r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) - } - } - } - return r, nil + return toACLRules(acls.Items), nil } func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error { @@ -169,17 +159,7 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { if err != nil { return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err) } - r := make([]ACLRule, 0, len(acls.Items)) - for _, v := range acls.Items { - if m, ok := v.(map[string]interface{}); ok { - entity, ok1 := m["entity"].(string) - role, ok2 := m["role"].(string) - if ok1 && ok2 { - r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) - } - } - } - return r, nil + return toACLRules(acls.Items), nil } func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error { @@ -202,3 +182,17 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { } return nil } + +func toACLRules(items []interface{}) []ACLRule { + r := make([]ACLRule, 0, len(items)) + for _, v := range items { + if m, ok := v.(map[string]interface{}); ok { + entity, ok1 := m["entity"].(string) + role, ok2 := m["role"].(string) + if ok1 && ok2 { + r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)}) + } + } + } + return r +} diff --git a/vendor/google.golang.org/cloud/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go similarity index 91% rename from vendor/google.golang.org/cloud/storage/reader.go rename to vendor/cloud.google.com/go/storage/reader.go index fed570708f4d..9e21648e5c70 100644 --- a/vendor/google.golang.org/cloud/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -31,7 +31,9 @@ func (r *Reader) Close() error { func (r *Reader) Read(p []byte) (int, error) { n, err := r.body.Read(p) - r.remain -= int64(n) + if r.remain != -1 { + r.remain -= int64(n) + } return n, err } @@ -42,7 +44,7 @@ func (r *Reader) Size() int64 { return r.size } -// Remain returns the number of bytes left to read. +// Remain returns the number of bytes left to read, or -1 if unknown. func (r *Reader) Remain() int64 { return r.remain } diff --git a/vendor/google.golang.org/cloud/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go similarity index 78% rename from vendor/google.golang.org/cloud/storage/storage.go rename to vendor/cloud.google.com/go/storage/storage.go index bc70f50b4e9a..095ab263ed17 100644 --- a/vendor/google.golang.org/cloud/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -15,9 +15,10 @@ // Package storage contains a Google Cloud Storage client. // // This package is experimental and may make backwards-incompatible changes. -package storage +package storage // import "cloud.google.com/go/storage" import ( + "bytes" "crypto" "crypto/rand" "crypto/rsa" @@ -37,8 +38,9 @@ import ( "time" "unicode/utf8" - "google.golang.org/cloud" - "google.golang.org/cloud/internal/transport" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" "golang.org/x/net/context" "google.golang.org/api/googleapi" @@ -48,6 +50,9 @@ import ( var ( ErrBucketNotExist = errors.New("storage: bucket doesn't exist") ErrObjectNotExist = errors.New("storage: object doesn't exist") + + // Done is returned by iterators in this package when they have no more items. + Done = iterator.Done ) const userAgent = "gcloud-golang-storage/20151204" @@ -68,50 +73,45 @@ const ( // AdminClient is a client type for performing admin operations on a project's // buckets. +// +// Deprecated: Client has all of AdminClient's methods. type AdminClient struct { - hc *http.Client - raw *raw.Service + c *Client projectID string } // NewAdminClient creates a new AdminClient for a given project. -func NewAdminClient(ctx context.Context, projectID string, opts ...cloud.ClientOption) (*AdminClient, error) { +// +// Deprecated: use NewClient instead. +func NewAdminClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*AdminClient, error) { c, err := NewClient(ctx, opts...) if err != nil { return nil, err } return &AdminClient{ - hc: c.hc, - raw: c.raw, + c: c, projectID: projectID, }, nil } // Close closes the AdminClient. func (c *AdminClient) Close() error { - c.hc = nil - return nil + return c.c.Close() } // Create creates a Bucket in the project. // If attrs is nil the API defaults will be used. +// +// Deprecated: use BucketHandle.Create instead. func (c *AdminClient) CreateBucket(ctx context.Context, bucketName string, attrs *BucketAttrs) error { - var bkt *raw.Bucket - if attrs != nil { - bkt = attrs.toRawBucket() - } else { - bkt = &raw.Bucket{} - } - bkt.Name = bucketName - req := c.raw.Buckets.Insert(c.projectID, bkt) - _, err := req.Context(ctx).Do() - return err + return c.c.Bucket(bucketName).Create(ctx, c.projectID, attrs) } // Delete deletes a Bucket in the project. +// +// Deprecated: use BucketHandle.Delete instead. func (c *AdminClient) DeleteBucket(ctx context.Context, bucketName string) error { - req := c.raw.Buckets.Delete(bucketName) - return req.Context(ctx).Do() + return c.c.Bucket(bucketName).Delete(ctx) } // Client is a client for interacting with Google Cloud Storage. @@ -121,11 +121,11 @@ type Client struct { } // NewClient creates a new Google Cloud Storage client. -// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use cloud.WithScopes. -func NewClient(ctx context.Context, opts ...cloud.ClientOption) (*Client, error) { - o := []cloud.ClientOption{ - cloud.WithScopes(ScopeFullControl), - cloud.WithUserAgent(userAgent), +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), } opts = append(o, opts...) hc, _, err := transport.NewHTTPClient(ctx, opts...) @@ -180,97 +180,6 @@ func (c *Client) Bucket(name string) *BucketHandle { } } -// ACL returns an ACLHandle, which provides access to the bucket's access control list. -// This controls who can list, create or overwrite the objects in a bucket. -// This call does not perform any network operations. -func (c *BucketHandle) ACL() *ACLHandle { - return c.acl -} - -// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. -// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. -// This call does not perform any network operations. -func (c *BucketHandle) DefaultObjectACL() *ACLHandle { - return c.defaultObjectACL -} - -// Object returns an ObjectHandle, which provides operations on the named object. -// This call does not perform any network operations. -// -// name must consist entirely of valid UTF-8-encoded runes. The full specification -// for valid object names can be found at: -// https://cloud.google.com/storage/docs/bucket-naming -func (b *BucketHandle) Object(name string) *ObjectHandle { - return &ObjectHandle{ - c: b.c, - bucket: b.name, - object: name, - acl: &ACLHandle{ - c: b.c, - bucket: b.name, - object: name, - }, - } -} - -// TODO(jbd): Add storage.buckets.list. -// TODO(jbd): Add storage.buckets.update. - -// TODO(jbd): Add storage.objects.watch. - -// Attrs returns the metadata for the bucket. -func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { - resp, err := b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do() - if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { - return nil, ErrBucketNotExist - } - if err != nil { - return nil, err - } - return newBucket(resp), nil -} - -// List lists objects from the bucket. You can specify a query -// to filter the results. If q is nil, no filtering is applied. -func (b *BucketHandle) List(ctx context.Context, q *Query) (*ObjectList, error) { - req := b.c.raw.Objects.List(b.name) - req.Projection("full") - if q != nil { - req.Delimiter(q.Delimiter) - req.Prefix(q.Prefix) - req.Versions(q.Versions) - req.PageToken(q.Cursor) - if q.MaxResults > 0 { - req.MaxResults(int64(q.MaxResults)) - } - } - resp, err := req.Context(ctx).Do() - if err != nil { - return nil, err - } - objects := &ObjectList{ - Results: make([]*ObjectAttrs, len(resp.Items)), - Prefixes: make([]string, len(resp.Prefixes)), - } - for i, item := range resp.Items { - objects.Results[i] = newObject(item) - } - for i, prefix := range resp.Prefixes { - objects.Prefixes[i] = prefix - } - if resp.NextPageToken != "" { - next := Query{} - if q != nil { - // keep the other filtering - // criteria if there is a query - next = *q - } - next.Cursor = resp.NextPageToken - objects.Next = &next - } - return objects, nil -} - // SignedURLOptions allows you to restrict the access to the signed URL. type SignedURLOptions struct { // GoogleAccessID represents the authorizer of the signed URL generation. @@ -290,9 +199,25 @@ type SignedURLOptions struct { // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes // // Provide the contents of the PEM file as a byte slice. - // Required. + // Exactly one of PrivateKey or SignBytes must be non-nil. PrivateKey []byte + // SignBytes is a function for implementing custom signing. + // If your application is running on Google App Engine, you can use appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + // Method is the HTTP method to be used with the signed URL. // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. // Required. @@ -328,8 +253,11 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { if opts == nil { return "", errors.New("storage: missing required SignedURLOptions") } - if opts.GoogleAccessID == "" || opts.PrivateKey == nil { - return "", errors.New("storage: missing required credentials to generate a signed URL") + if opts.GoogleAccessID == "" { + return "", errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") } if opts.Method == "" { return "", errors.New("storage: missing required method option") @@ -337,26 +265,39 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { if opts.Expires.IsZero() { return "", errors.New("storage: missing required expires option") } - key, err := parseKey(opts.PrivateKey) - if err != nil { - return "", err + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } else { + signBytes = opts.SignBytes } + u := &url.URL{ Path: fmt.Sprintf("/%s/%s", bucket, name), } - h := sha256.New() - fmt.Fprintf(h, "%s\n", opts.Method) - fmt.Fprintf(h, "%s\n", opts.MD5) - fmt.Fprintf(h, "%s\n", opts.ContentType) - fmt.Fprintf(h, "%d\n", opts.Expires.Unix()) - fmt.Fprintf(h, "%s", strings.Join(opts.Headers, "\n")) - fmt.Fprintf(h, "%s", u.String()) - b, err := rsa.SignPKCS1v15( - rand.Reader, - key, - crypto.SHA256, - h.Sum(nil), - ) + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + fmt.Fprintf(buf, "%s", strings.Join(opts.Headers, "\n")) + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) if err != nil { return "", err } @@ -446,7 +387,16 @@ func (o *ObjectHandle) Delete(ctx context.Context) error { if err := applyConds("Delete", o.conds, call); err != nil { return err } - return call.Do() + err := call.Do() + switch e := err.(type) { + case nil: + return nil + case *googleapi.Error: + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err } // CopyTo copies the object to the given dst. @@ -487,6 +437,52 @@ func (o *ObjectHandle) CopyTo(ctx context.Context, dst *ObjectHandle, attrs *Obj return newObject(obj), nil } +// ComposeFrom concatenates the provided slice of source objects into a new +// object whose destination is the receiver. The provided attrs, if not nil, +// are used to set the attributes on the newly-created object. All source +// objects must reside within the same bucket as the destination. +func (o *ObjectHandle) ComposeFrom(ctx context.Context, srcs []*ObjectHandle, attrs *ObjectAttrs) (*ObjectAttrs, error) { + if o.bucket == "" || o.object == "" { + return nil, errors.New("storage: the destination bucket and object names must be non-empty") + } + if len(srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + req := &raw.ComposeRequest{} + if attrs != nil { + req.Destination = attrs.toRawObject(o.bucket) + req.Destination.Name = o.object + } + + for _, src := range srcs { + if src.bucket != o.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", o.bucket, src.bucket) + } + if src.object == "" { + return nil, errors.New("storage: all source object names must be non-empty") + } + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.object, + } + if err := applyConds("ComposeFrom source", src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + req.SourceObjects = append(req.SourceObjects, srcObj) + } + + call := o.c.raw.Objects.Compose(o.bucket, o.object, req).Context(ctx) + if err := applyConds("ComposeFrom destination", o.conds, call); err != nil { + return nil, err + } + + obj, err := call.Do() + if err != nil { + return nil, err + } + return newObject(obj), nil +} + // NewReader creates a new Reader to read the contents of the // object. // ErrObjectNotExist will be returned if the object is not found. @@ -520,7 +516,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) if err := applyConds("NewReader", o.conds, objectsGetCall{req}); err != nil { return nil, err } - if length < 0 { + if length < 0 && offset > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) } else if length > 0 { req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) @@ -534,8 +530,13 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) return nil, ErrObjectNotExist } if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) res.Body.Close() - return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", o.bucket, o.object, res.Status) + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } } if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { res.Body.Close() @@ -547,9 +548,6 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) return nil, fmt.Errorf("storage: can't parse content length %q: %v", clHeader, err) } remain := res.ContentLength - if remain < 0 { - return nil, errors.New("storage: unknown content length") - } body := res.Body if length == 0 { remain = 0 @@ -612,64 +610,6 @@ func parseKey(key []byte) (*rsa.PrivateKey, error) { return parsed, nil } -// BucketAttrs represents the metadata for a Google Cloud Storage bucket. -type BucketAttrs struct { - // Name is the name of the bucket. - Name string - - // ACL is the list of access control rules on the bucket. - ACL []ACLRule - - // DefaultObjectACL is the list of access controls to - // apply to new objects when no object ACL is provided. - DefaultObjectACL []ACLRule - - // Location is the location of the bucket. It defaults to "US". - Location string - - // MetaGeneration is the metadata generation of the bucket. - MetaGeneration int64 - - // StorageClass is the storage class of the bucket. This defines - // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "STANDARD" and - // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD". - StorageClass string - - // Created is the creation time of the bucket. - Created time.Time -} - -func newBucket(b *raw.Bucket) *BucketAttrs { - if b == nil { - return nil - } - bucket := &BucketAttrs{ - Name: b.Name, - Location: b.Location, - MetaGeneration: b.Metageneration, - StorageClass: b.StorageClass, - Created: convertTime(b.TimeCreated), - } - acl := make([]ACLRule, len(b.Acl)) - for i, rule := range b.Acl { - acl[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.ACL = acl - objACL := make([]ACLRule, len(b.DefaultObjectAcl)) - for i, rule := range b.DefaultObjectAcl { - objACL[i] = ACLRule{ - Entity: ACLEntity(rule.Entity), - Role: ACLRole(rule.Role), - } - } - bucket.DefaultObjectACL = objACL - return bucket -} - func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { var acl []*raw.ObjectAccessControl if len(oldACL) > 0 { @@ -684,28 +624,6 @@ func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { return acl } -// toRawBucket copies the editable attribute from b to the raw library's Bucket type. -func (b *BucketAttrs) toRawBucket() *raw.Bucket { - var acl []*raw.BucketAccessControl - if len(b.ACL) > 0 { - acl = make([]*raw.BucketAccessControl, len(b.ACL)) - for i, rule := range b.ACL { - acl[i] = &raw.BucketAccessControl{ - Entity: string(rule.Entity), - Role: string(rule.Role), - } - } - } - dACL := toRawObjectACL(b.DefaultObjectACL) - return &raw.Bucket{ - Name: b.Name, - DefaultObjectAcl: dACL, - Location: b.Location, - StorageClass: b.StorageClass, - Acl: acl, - } -} - // toRawObject copies the editable attributes from o to the raw library's Object type. func (o ObjectAttrs) toRawObject(bucket string) *raw.Object { acl := toRawObjectACL(o.ACL) @@ -803,6 +721,12 @@ type ObjectAttrs struct { // For buckets with versioning enabled, changing an object's // metadata does not change this property. This field is read-only. Updated time.Time + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string } // convertTime converts a time in RFC3339 format to time.Time. @@ -882,31 +806,19 @@ type Query struct { // Cursor is a previously-returned page token // representing part of the larger set of results to view. // Optional. + // + // Deprecated: Use ObjectIterator.PageInfo().Token instead. Cursor string // MaxResults is the maximum number of items plus prefixes // to return. As duplicate prefixes are omitted, // fewer total results may be returned than requested. // The default page limit is used if it is negative or zero. + // + // Deprecated: Use ObjectIterator.PageInfo().MaxSize instead. MaxResults int } -// ObjectList represents a list of objects returned from a bucket List call. -type ObjectList struct { - // Results represent a list of object results. - Results []*ObjectAttrs - - // Next is the continuation query to retrieve more - // results with the same filtering criteria. If there - // are no more results to retrieve, it is nil. - Next *Query - - // Prefixes represents prefixes of objects - // matching-but-not-listed up to and including - // the requested delimiter. - Prefixes []string -} - // contentTyper implements ContentTyper to enable an // io.ReadCloser to specify its MIME type. type contentTyper struct { @@ -1018,3 +930,23 @@ func (c objectsGetCall) IfMetagenerationMatch(gen int64) { func (c objectsGetCall) IfMetagenerationNotMatch(gen int64) { appendParam(c.req, "ifMetagenerationNotMatch", fmt.Sprint(gen)) } + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +// TODO(jbd): Add storage.objects.watch. diff --git a/vendor/google.golang.org/cloud/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go similarity index 100% rename from vendor/google.golang.org/cloud/storage/writer.go rename to vendor/cloud.google.com/go/storage/writer.go diff --git a/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go b/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go index 05d0c6e9d6b0..69116a35c547 100644 --- a/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go +++ b/vendor/github.com/google/cadvisor/container/libcontainer/helpers.go @@ -45,7 +45,7 @@ type CgroupSubsystems struct { // Get information about the cgroup subsystems. func GetCgroupSubsystems() (CgroupSubsystems, error) { // Get all cgroup mounts. - allCgroups, err := cgroups.GetCgroupMounts() + allCgroups, err := cgroups.GetCgroupMounts(true) if err != nil { return CgroupSubsystems{}, err } diff --git a/vendor/github.com/google/cadvisor/metrics/prometheus.go b/vendor/github.com/google/cadvisor/metrics/prometheus.go index 7d7385993270..775866bb368c 100644 --- a/vendor/github.com/google/cadvisor/metrics/prometheus.go +++ b/vendor/github.com/google/cadvisor/metrics/prometheus.go @@ -228,6 +228,26 @@ func NewPrometheusCollector(i infoProvider, f ContainerLabelsFunc) *PrometheusCo }, } }, + }, { + name: "container_fs_inodes_free", + help: "Number of available Inodes", + valueType: prometheus.GaugeValue, + extraLabels: []string{"device"}, + getValues: func(s *info.ContainerStats) metricValues { + return fsValues(s.Filesystem, func(fs *info.FsStats) float64 { + return float64(fs.InodesFree) + }) + }, + }, { + name: "container_fs_inodes_total", + help: "Number of Inodes", + valueType: prometheus.GaugeValue, + extraLabels: []string{"device"}, + getValues: func(s *info.ContainerStats) metricValues { + return fsValues(s.Filesystem, func(fs *info.FsStats) float64 { + return float64(fs.Inodes) + }) + }, }, { name: "container_fs_limit_bytes", help: "Number of bytes that can be consumed by the container on this filesystem.", diff --git a/vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go b/vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go index 4c2e330dfeac..b525451c05d9 100644 --- a/vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go +++ b/vendor/github.com/google/cadvisor/utils/cloudinfo/gce.go @@ -20,8 +20,8 @@ import ( info "github.com/google/cadvisor/info/v1" + "cloud.google.com/go/compute/metadata" "github.com/golang/glog" - "google.golang.org/cloud/compute/metadata" ) const ( diff --git a/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md b/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md index 32578f01a30f..e5894c6429da 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md +++ b/vendor/github.com/opencontainers/runc/libcontainer/SPEC.md @@ -71,7 +71,6 @@ that are required for executing a container's process. | /dev/tty | 0666 | rwm | | /dev/random | 0666 | rwm | | /dev/urandom | 0666 | rwm | -| /dev/fuse | 0666 | rwm | **ptmx** diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go index 274ab47dd83a..35fc8eb961d1 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go @@ -37,7 +37,7 @@ type Manager interface { // restore the object later. GetPaths() map[string]string - // Set the cgroup as configured. + // Sets the cgroup as configured. Set(container *configs.Config) error } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go index ed46561a251f..9692e4fb233c 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/apply_raw.go @@ -104,6 +104,8 @@ func (m *Manager) Apply(pid int) (err error) { if m.Cgroups == nil { return nil } + m.mu.Lock() + defer m.mu.Unlock() var c = m.Cgroups @@ -128,8 +130,6 @@ func (m *Manager) Apply(pid int) (err error) { return cgroups.EnterPid(m.Paths, pid) } - m.mu.Lock() - defer m.mu.Unlock() paths := make(map[string]string) for _, sys := range subsystems { if err := sys.Apply(d); err != nil { @@ -195,18 +195,10 @@ func (m *Manager) Set(container *configs.Config) error { if m.Cgroups.Paths != nil { return nil } - for _, sys := range subsystems { - // Generate fake cgroup data. - d, err := getCgroupData(container.Cgroups, -1) - if err != nil { - return err - } - // Get the path, but don't error out if the cgroup wasn't found. - path, err := d.path(sys.Name()) - if err != nil && !cgroups.IsNotFound(err) { - return err - } + paths := m.GetPaths() + for _, sys := range subsystems { + path := paths[sys.Name()] if err := sys.Set(path, container.Cgroups); err != nil { return err } @@ -223,14 +215,8 @@ func (m *Manager) Set(container *configs.Config) error { // Freeze toggles the container's freezer cgroup depending on the state // provided func (m *Manager) Freeze(state configs.FreezerState) error { - d, err := getCgroupData(m.Cgroups, 0) - if err != nil { - return err - } - dir, err := d.path("freezer") - if err != nil { - return err - } + paths := m.GetPaths() + dir := paths["freezer"] prevState := m.Cgroups.Resources.Freezer m.Cgroups.Resources.Freezer = state freezer, err := subsystems.Get("freezer") @@ -246,28 +232,13 @@ func (m *Manager) Freeze(state configs.FreezerState) error { } func (m *Manager) GetPids() ([]int, error) { - dir, err := getCgroupPath(m.Cgroups) - if err != nil { - return nil, err - } - return cgroups.GetPids(dir) + paths := m.GetPaths() + return cgroups.GetPids(paths["devices"]) } func (m *Manager) GetAllPids() ([]int, error) { - dir, err := getCgroupPath(m.Cgroups) - if err != nil { - return nil, err - } - return cgroups.GetAllPids(dir) -} - -func getCgroupPath(c *configs.Cgroup) (string, error) { - d, err := getCgroupData(c, 0) - if err != nil { - return "", err - } - - return d.path("devices") + paths := m.GetPaths() + return cgroups.GetAllPids(paths["devices"]) } func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go index a4ef28a60f87..7cd506a8ec26 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs/cpu.go @@ -22,10 +22,48 @@ func (s *CpuGroup) Name() string { func (s *CpuGroup) Apply(d *cgroupData) error { // We always want to join the cpu group, to allow fair cpu scheduling // on a container basis - _, err := d.join("cpu") + path, err := d.path("cpu") if err != nil && !cgroups.IsNotFound(err) { return err } + return s.ApplyDir(path, d.config, d.pid) +} + +func (s *CpuGroup) ApplyDir(path string, cgroup *configs.Cgroup, pid int) error { + // This might happen if we have no cpu cgroup mounted. + // Just do nothing and don't fail. + if path == "" { + return nil + } + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + // We should set the real-Time group scheduling settings before moving + // in the process because if the process is already in SCHED_RR mode + // and no RT bandwidth is set, adding it will fail. + if err := s.SetRtSched(path, cgroup); err != nil { + return err + } + // because we are not using d.join we need to place the pid into the procs file + // unlike the other subsystems + if err := cgroups.WriteCgroupProc(path, pid); err != nil { + return err + } + + return nil +} + +func (s *CpuGroup) SetRtSched(path string, cgroup *configs.Cgroup) error { + if cgroup.Resources.CpuRtPeriod != 0 { + if err := writeFile(path, "cpu.rt_period_us", strconv.FormatInt(cgroup.Resources.CpuRtPeriod, 10)); err != nil { + return err + } + } + if cgroup.Resources.CpuRtRuntime != 0 { + if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil { + return err + } + } return nil } @@ -45,15 +83,8 @@ func (s *CpuGroup) Set(path string, cgroup *configs.Cgroup) error { return err } } - if cgroup.Resources.CpuRtPeriod != 0 { - if err := writeFile(path, "cpu.rt_period_us", strconv.FormatInt(cgroup.Resources.CpuRtPeriod, 10)); err != nil { - return err - } - } - if cgroup.Resources.CpuRtRuntime != 0 { - if err := writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(cgroup.Resources.CpuRtRuntime, 10)); err != nil { - return err - } + if err := s.SetRtSched(path, cgroup); err != nil { + return err } return nil diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go index 957db3356b4f..8946dd5959e4 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -40,10 +40,6 @@ func FindCgroupMountpoint(subsystem string) (string, error) { txt := scanner.Text() fields := strings.Split(txt, " ") for _, opt := range strings.Split(fields[len(fields)-1], ",") { - // temporary change to allow containerized cadvisor to start on RHEL (where host and container have reversed cpu,cpuacct) - if strings.Contains(fields[4], ",") { - continue - } if opt == subsystem { return fields[4], nil } @@ -71,10 +67,6 @@ func FindCgroupMountpointAndRoot(subsystem string) (string, string, error) { txt := scanner.Text() fields := strings.Split(txt, " ") for _, opt := range strings.Split(fields[len(fields)-1], ",") { - // temporary change to allow containerized cadvisor to start on RHEL (where host and container have reversed cpu,cpuacct) - if strings.Contains(fields[4], ",") { - continue - } if opt == subsystem { return fields[4], fields[3], nil } @@ -107,12 +99,6 @@ func FindCgroupMountpointDir() (string, error) { for scanner.Scan() { text := scanner.Text() fields := strings.Split(text, " ") - - // temporary change to allow containerized cadvisor to start on RHEL (where host and container have reversed cpu,cpuacct) - if strings.Contains(fields[4], ",") { - continue - } - // Safe as mountinfo encodes mountpoints with spaces as \040. index := strings.Index(text, " - ") postSeparatorFields := strings.Fields(text[index+3:]) @@ -153,7 +139,7 @@ func (m Mount) GetThisCgroupDir(cgroups map[string]string) (string, error) { return getControllerPath(m.Subsystems[0], cgroups) } -func getCgroupMountsHelper(ss map[string]bool, mi io.Reader) ([]Mount, error) { +func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, error) { res := make([]Mount, 0, len(ss)) scanner := bufio.NewScanner(mi) numFound := 0 @@ -167,10 +153,6 @@ func getCgroupMountsHelper(ss map[string]bool, mi io.Reader) ([]Mount, error) { continue } fields := strings.Split(txt, " ") - // temporary change to allow containerized cadvisor to start on RHEL (where host and container have reversed cpu,cpuacct) - if strings.Contains(fields[4], ",") { - continue - } m := Mount{ Mountpoint: fields[4], Root: fields[3], @@ -184,7 +166,9 @@ func getCgroupMountsHelper(ss map[string]bool, mi io.Reader) ([]Mount, error) { } else { m.Subsystems = append(m.Subsystems, opt) } - numFound++ + if !all { + numFound++ + } } res = append(res, m) } @@ -194,23 +178,25 @@ func getCgroupMountsHelper(ss map[string]bool, mi io.Reader) ([]Mount, error) { return res, nil } -func GetCgroupMounts() ([]Mount, error) { +// GetCgroupMounts returns the mounts for the cgroup subsystems. +// all indicates whether to return just the first instance or all the mounts. +func GetCgroupMounts(all bool) ([]Mount, error) { f, err := os.Open("/proc/self/mountinfo") if err != nil { return nil, err } defer f.Close() - all, err := ParseCgroupFile("/proc/self/cgroup") + allSubsystems, err := ParseCgroupFile("/proc/self/cgroup") if err != nil { return nil, err } allMap := make(map[string]bool) - for s := range all { + for s := range allSubsystems { allMap[s] = true } - return getCgroupMountsHelper(allMap, f) + return getCgroupMountsHelper(allMap, f, all) } // GetAllSubsystems returns all the cgroup subsystems supported by the kernel diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go index bd6f69b82f4d..94b38879ed62 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go @@ -120,5 +120,5 @@ type Resources struct { NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"` // Set class identifier for container's network packets - NetClsClassid uint32 `json:"net_cls_classid"` + NetClsClassid uint32 `json:"net_cls_classid_u"` } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index 3c38191b3542..a56d12bdb97e 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -300,29 +300,38 @@ func (c Command) Run(s HookState) error { if err != nil { return err } + var stdout, stderr bytes.Buffer cmd := exec.Cmd{ - Path: c.Path, - Args: c.Args, - Env: c.Env, - Stdin: bytes.NewReader(b), + Path: c.Path, + Args: c.Args, + Env: c.Env, + Stdin: bytes.NewReader(b), + Stdout: &stdout, + Stderr: &stderr, + } + if err := cmd.Start(); err != nil { + return err } errC := make(chan error, 1) go func() { - out, err := cmd.CombinedOutput() + err := cmd.Wait() if err != nil { - err = fmt.Errorf("%s: %s", err, out) + err = fmt.Errorf("error running hook: %v, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) } errC <- err }() + var timerCh <-chan time.Time if c.Timeout != nil { - select { - case err := <-errC: - return err - case <-time.After(*c.Timeout): - cmd.Process.Kill() - cmd.Wait() - return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds()) - } + timer := time.NewTimer(*c.Timeout) + defer timer.Stop() + timerCh = timer.C + } + select { + case err := <-errC: + return err + case <-timerCh: + cmd.Process.Kill() + cmd.Wait() + return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds()) } - return <-errC } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go index ba1f437f3bbb..4d348d217ec9 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go @@ -107,19 +107,5 @@ var ( Permissions: "rwm", }, }, DefaultSimpleDevices...) - DefaultAutoCreatedDevices = append([]*Device{ - { - // /dev/fuse is created but not allowed. - // This is to allow java to work. Because java - // Insists on there being a /dev/fuse - // https://github.com/docker/docker/issues/514 - // https://github.com/docker/docker/issues/2393 - // - Path: "/dev/fuse", - Type: 'c', - Major: 10, - Minor: 229, - Permissions: "rwm", - }, - }, DefaultSimpleDevices...) + DefaultAutoCreatedDevices = append([]*Device{}, DefaultSimpleDevices...) ) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container.go b/vendor/github.com/opencontainers/runc/libcontainer/container.go index 1a71179c96dd..6844fbc7a8f5 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/container.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/container.go @@ -75,8 +75,8 @@ type BaseContainer interface { // Returns the current status of the container. // // errors: - // ContainerDestroyed - Container no longer exists, - // SystemError - System error. + // ContainerNotExists - Container no longer exists, + // Systemerror - System error. Status() (Status, error) // State returns the current container's state information. @@ -91,8 +91,8 @@ type BaseContainer interface { // Returns the PIDs inside this container. The PIDs are in the namespace of the calling process. // // errors: - // ContainerDestroyed - Container no longer exists, - // SystemError - System error. + // ContainerNotExists - Container no longer exists, + // Systemerror - System error. // // Some of the returned PIDs may no longer refer to processes in the Container, unless // the Container state is PAUSED in which case every PID in the slice is valid. @@ -101,8 +101,8 @@ type BaseContainer interface { // Returns statistics for the container. // // errors: - // ContainerDestroyed - Container no longer exists, - // SystemError - System error. + // ContainerNotExists - Container no longer exists, + // Systemerror - System error. Stats() (*Stats, error) // Set resources of container as configured @@ -117,7 +117,7 @@ type BaseContainer interface { // start. You can track process lifecycle with passed Process structure. // // errors: - // ContainerDestroyed - Container no longer exists, + // ContainerNotExists - Container no longer exists, // ConfigInvalid - config is invalid, // ContainerPaused - Container is paused, // SystemError - System error. @@ -128,7 +128,7 @@ type BaseContainer interface { // opens the fifo after start returns. // // errors: - // ContainerDestroyed - Container no longer exists, + // ContainerNotExists - Container no longer exists, // ConfigInvalid - config is invalid, // ContainerPaused - Container is paused, // SystemError - System error. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go index 70cbc6359f38..29c8b3437be3 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/container_linux.go @@ -35,7 +35,6 @@ type linuxContainer struct { root string config *configs.Config cgroupManager cgroups.Manager - initPath string initArgs []string initProcess parentProcess initProcessStartTime string @@ -86,13 +85,14 @@ type Container interface { // Systemerror - System error. Restore(process *Process, criuOpts *CriuOpts) error - // If the Container state is RUNNING, sets the Container state to PAUSING and pauses + // If the Container state is RUNNING or CREATED, sets the Container state to PAUSING and pauses // the execution of any user processes. Asynchronously, when the container finished being paused the // state is changed to PAUSED. // If the Container state is PAUSED, do nothing. // // errors: - // ContainerDestroyed - Container no longer exists, + // ContainerNotExists - Container no longer exists, + // ContainerNotRunning - Container not running or created, // Systemerror - System error. Pause() error @@ -101,7 +101,8 @@ type Container interface { // If the Container state is RUNNING, do nothing. // // errors: - // ContainerDestroyed - Container no longer exists, + // ContainerNotExists - Container no longer exists, + // ContainerNotPaused - Container is not paused, // Systemerror - System error. Resume() error @@ -308,10 +309,7 @@ func (c *linuxContainer) newParentProcess(p *Process, doInit bool) (parentProces } func (c *linuxContainer) commandTemplate(p *Process, childPipe, rootDir *os.File) (*exec.Cmd, error) { - cmd := &exec.Cmd{ - Path: c.initPath, - Args: c.initArgs, - } + cmd := exec.Command(c.initArgs[0], c.initArgs[1:]...) cmd.Stdin = p.Stdin cmd.Stdout = p.Stdout cmd.Stderr = p.Stderr @@ -447,7 +445,7 @@ func (c *linuxContainer) Pause() error { c: c, }) } - return newGenericError(fmt.Errorf("container not running: %s", status), ContainerNotRunning) + return newGenericError(fmt.Errorf("container not running or created: %s", status), ContainerNotRunning) } func (c *linuxContainer) Resume() error { @@ -1049,6 +1047,8 @@ func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Proc }); err != nil { return err } + // create a timestamp indicating when the restored checkpoint was started + c.created = time.Now().UTC() if _, err := c.updateState(r); err != nil { return err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go index 6cce46e0dd64..0abc2c5a24a0 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "os" - "os/exec" "path/filepath" "regexp" "runtime/debug" @@ -33,32 +32,9 @@ var ( ) // InitArgs returns an options func to configure a LinuxFactory with the -// provided init arguments. +// provided init binary path and arguments. func InitArgs(args ...string) func(*LinuxFactory) error { return func(l *LinuxFactory) error { - name := args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - name = lp - } - } else { - abs, err := filepath.Abs(name) - if err != nil { - return err - } - name = abs - } - l.InitPath = "/proc/self/exe" - l.InitArgs = append([]string{name}, args[1:]...) - return nil - } -} - -// InitPath returns an options func to configure a LinuxFactory with the -// provided absolute path to the init binary and arguements. -func InitPath(path string, args ...string) func(*LinuxFactory) error { - return func(l *LinuxFactory) error { - l.InitPath = path l.InitArgs = args return nil } @@ -122,10 +98,10 @@ func New(root string, options ...func(*LinuxFactory) error) (Factory, error) { } l := &LinuxFactory{ Root: root, + InitArgs: []string{"/proc/self/exe", "init"}, Validator: validate.New(), CriuPath: "criu", } - InitArgs(os.Args[0], "init")(l) Cgroupfs(l) for _, opt := range options { if err := opt(l); err != nil { @@ -140,9 +116,6 @@ type LinuxFactory struct { // Root directory for the factory to store state. Root string - // InitPath is the absolute path to the init binary. - InitPath string - // InitArgs are arguments for calling the init responsibilities for spawning // a container. InitArgs []string @@ -202,7 +175,6 @@ func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, err id: id, root: containerRoot, config: config, - initPath: l.InitPath, initArgs: l.InitArgs, criuPath: l.CriuPath, cgroupManager: l.NewCgroupsManager(config.Cgroups, nil), @@ -216,7 +188,7 @@ func (l *LinuxFactory) Load(id string) (Container, error) { return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid) } containerRoot := filepath.Join(l.Root, id) - state, err := l.loadState(containerRoot) + state, err := l.loadState(containerRoot, id) if err != nil { return nil, err } @@ -230,7 +202,6 @@ func (l *LinuxFactory) Load(id string) (Container, error) { initProcessStartTime: state.InitProcessStartTime, id: id, config: &state.Config, - initPath: l.InitPath, initArgs: l.InitArgs, criuPath: l.CriuPath, cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths), @@ -302,11 +273,11 @@ func (l *LinuxFactory) StartInitialization() (err error) { return i.Init() } -func (l *LinuxFactory) loadState(root string) (*State, error) { +func (l *LinuxFactory) loadState(root, id string) (*State, error) { f, err := os.Open(filepath.Join(root, stateFilename)) if err != nil { if os.IsNotExist(err) { - return nil, newGenericError(err, ContainerNotExists) + return nil, newGenericError(fmt.Errorf("container %q does not exists", id), ContainerNotExists) } return nil, newGenericError(err, SystemError) } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go b/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go index 9c3d32492ba7..de37715c9284 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/generic_error.go @@ -67,9 +67,6 @@ func newSystemErrorWithCause(err error, cause string) Error { // stack frames skipped. This is only to be called by the other functions for // formatting the error. func createSystemError(err error, cause string) Error { - if le, ok := err.(Error); ok { - return le - } gerr := &genericError{ Timestamp: time.Now(), Err: err, diff --git a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go index 01ff0d133dff..b1e6762ecdf3 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/init_linux.go @@ -144,7 +144,7 @@ func finalizeNamespace(config *initConfig) error { } if config.Cwd != "" { if err := syscall.Chdir(config.Cwd); err != nil { - return err + return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %v", config.Cwd, err) } } return nil diff --git a/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go b/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go index 4493bda77418..1d9d78a3902d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/label/label_selinux.go @@ -129,7 +129,7 @@ func Relabel(path string, fileLabel string, shared bool) error { exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true} if exclude_paths[path] { - return fmt.Errorf("Relabeling of %s is not allowed", path) + return fmt.Errorf("SELinux relabeling of %s is not allowed", path) } if shared { @@ -137,7 +137,10 @@ func Relabel(path string, fileLabel string, shared bool) error { c["level"] = "s0" fileLabel = c.Get() } - return selinux.Chcon(path, fileLabel, true) + if err := selinux.Chcon(path, fileLabel, true); err != nil { + return fmt.Errorf("SELinux relabeling of %s is not allowed: %q", path, err) + } + return nil } // GetPidLabel will return the label of the process running with the specified pid diff --git a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go index 33db39239d17..5b81317fd711 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/process_linux.go @@ -32,7 +32,7 @@ type parentProcess interface { // wait waits on the process returning the process state. wait() (*os.ProcessState, error) - // startTime return's the process start time. + // startTime returns the process start time. startTime() (string, error) signal(os.Signal) error @@ -356,7 +356,7 @@ loop: } } if !sentRun { - return newSystemErrorWithCause(ierr, "container init failed") + return newSystemErrorWithCause(ierr, "container init") } if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume { return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process")) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go index 943b2fc09985..67b7a2754e57 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go @@ -50,7 +50,7 @@ func setupRootfs(config *configs.Config, console *linuxConsole, pipe io.ReadWrit } } if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil { - return newSystemErrorWithCausef(err, "mounting %q to rootfs %q", m.Destination, config.Rootfs) + return newSystemErrorWithCausef(err, "mounting %q to rootfs %q at %q", m.Source, config.Rootfs, m.Destination) } for _, postcmd := range m.PostmountCmds { @@ -270,7 +270,7 @@ func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { } func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) { - mounts, err := cgroups.GetCgroupMounts() + mounts, err := cgroups.GetCgroupMounts(false) if err != nil { return nil, err } @@ -320,6 +320,8 @@ func checkMountDestination(rootfs, dest string) error { "/proc/diskstats", "/proc/meminfo", "/proc/stat", + "/proc/swaps", + "/proc/uptime", "/proc/net/dev", } for _, valid := range validDestinations { diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go index c99006518907..bb44d895ce96 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go @@ -8,7 +8,7 @@ import ( // Setuid sets the uid of the calling thread to the specified uid. func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) if e1 != 0 { err = e1 } diff --git a/vendor/github.com/openshift/source-to-image/pkg/build/strategies/sti/sti.go b/vendor/github.com/openshift/source-to-image/pkg/build/strategies/sti/sti.go index 018e8c50c2e0..cabc919eca24 100644 --- a/vendor/github.com/openshift/source-to-image/pkg/build/strategies/sti/sti.go +++ b/vendor/github.com/openshift/source-to-image/pkg/build/strategies/sti/sti.go @@ -202,11 +202,12 @@ func (builder *STI) Build(config *api.Config) (*api.Result, error) { glog.V(1).Infof("Running %q in %q", api.Assemble, config.Tag) } if err := builder.scripts.Execute(api.Assemble, config.AssembleUser, config); err != nil { + builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonAssembleFailed, utilstatus.ReasonMessageAssembleFailed) switch e := err.(type) { case errors.ContainerError: if !isMissingRequirements(e.Output) { - builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonAssembleFailed, utilstatus.ReasonMessageAssembleFailed) + builder.result.BuildInfo.FailureReason = utilstatus.NewFailureReason(utilstatus.ReasonUnmetS2IDependencies, utilstatus.ReasonMessageUnmetS2IDependencies) return builder.result, err } glog.V(1).Info("Image is missing basic requirements (sh or tar), layered build will be performed") diff --git a/vendor/github.com/openshift/source-to-image/pkg/util/status/build_status.go b/vendor/github.com/openshift/source-to-image/pkg/util/status/build_status.go index aaa432c45cd5..4f9148a69103 100644 --- a/vendor/github.com/openshift/source-to-image/pkg/util/status/build_status.go +++ b/vendor/github.com/openshift/source-to-image/pkg/util/status/build_status.go @@ -83,12 +83,21 @@ const ( ReasonMessageInstallScriptsFailed api.StepFailureMessage = "Failed to install specified scripts" // ReasonGenericS2IBuildFailed is the reason associated with a broad range of - // failures. + // failure. ReasonGenericS2IBuildFailed api.StepFailureReason = "GenericS2IBuildFailed" - // ReasonMessageGenericS2iBuildFailed is the message associated with a broad - // range of failures. + // ReasonMessageGenericS2iBuildFailed is the message with a broad range of + // failure. ReasonMessageGenericS2iBuildFailed api.StepFailureMessage = "Generic S2I Build failure - check S2I logs for details" + // ReasonUnmetS2IDependencies is the failure reason associated with a + // builder image that doesn't contain required dependencies for building the + // app. + ReasonUnmetS2IDependencies api.StepFailureReason = "UnmetBuilderImageDependencies" + // ReasonMessageUnmetS2IDependencies is the message associated with a + // builder image that doesn't contain required dependencies for building the + // app. + ReasonMessageUnmetS2IDependencies api.StepFailureMessage = "Builder image is missing mandatory dependencies (sh and tar)" + // ReasonTarSourceFailed is the failure reason associated with a failure to // tar the current source. ReasonTarSourceFailed api.StepFailureReason = "TarSourceFailed" diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml index a035125c3588..fa139db22519 100644 --- a/vendor/golang.org/x/oauth2/.travis.yml +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -1,8 +1,7 @@ language: go go: - - 1.3 - - 1.4 + - tip install: - export GOPATH="$HOME/gopath" diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 0d5141733f57..1643c08ef876 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -1,6 +1,7 @@ # OAuth2 for Go [![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) oauth2 package contains a client implementation for OAuth 2.0 spec. diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go index 4a554cb9bf69..8962c49d1deb 100644 --- a/vendor/golang.org/x/oauth2/client_appengine.go +++ b/vendor/golang.org/x/oauth2/client_appengine.go @@ -1,8 +1,8 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine appenginevm +// +build appengine // App Engine hooks. diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go index 65dc347314d6..dc993efb5e10 100644 --- a/vendor/golang.org/x/oauth2/google/appengine.go +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -14,6 +14,9 @@ import ( "golang.org/x/oauth2" ) +// Set at init time by appenginevm_hook.go. If true, we are on App Engine Managed VMs. +var appengineVM bool + // Set at init time by appengine_hook.go. If nil, we're not on App Engine. var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go index 2f9b15432fa8..4f42c8b34354 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_hook.go +++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go @@ -1,8 +1,8 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine appenginevm +// +build appengine package google diff --git a/vendor/golang.org/x/oauth2/google/appenginevm_hook.go b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go new file mode 100644 index 000000000000..633611cc3a01 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appenginevm_hook.go @@ -0,0 +1,14 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineVM = true + appengineTokenFunc = appengine.AccessToken +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 78f8089853f3..565d731c45b0 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -1,4 +1,4 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -14,10 +14,10 @@ import ( "path/filepath" "runtime" + "cloud.google.com/go/compute/metadata" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" ) // DefaultClient returns an HTTP Client that uses the @@ -50,7 +50,8 @@ func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { // On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. // On other systems, $HOME/.config/gcloud/application_default_credentials.json. // 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine, it fetches credentials from the metadata server. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. // (In this final case any provided scopes are ignored.) // // For more details, see: @@ -84,7 +85,7 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc } // Third, if we're on Google App Engine use those credentials. - if appengineTokenFunc != nil { + if appengineTokenFunc != nil && !appengineVM { return AppEngineTokenSource(ctx, scope...), nil } diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 2077d9866faf..4f7352762b34 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -21,9 +21,9 @@ import ( "strings" "time" + "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/jwt" - "google.golang.org/cloud/compute/metadata" ) // Endpoint is Google's OAuth 2.0 endpoint. @@ -37,9 +37,10 @@ const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" // ConfigFromJSON uses a Google Developers Console client_credentials.json // file to construct a config. -// client_credentials.json can be downloadable from https://console.developers.google.com, -// under "APIs & Auth" > "Credentials". Download the Web application credentials in the -// JSON format and provide the contents of the file as jsonKey. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { type cred struct { ClientID string `json:"client_id"` @@ -81,22 +82,29 @@ func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { // JWTConfigFromJSON uses a Google Developers service account JSON key file to read // the credentials that authorize and authenticate the requests. -// Create a service account on "Credentials" page under "APIs & Auth" for your -// project at https://console.developers.google.com to download a JSON key file. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { var key struct { - Email string `json:"client_email"` - PrivateKey string `json:"private_key"` + Email string `json:"client_email"` + PrivateKey string `json:"private_key"` + PrivateKeyID string `json:"private_key_id"` + TokenURL string `json:"token_uri"` } if err := json.Unmarshal(jsonKey, &key); err != nil { return nil, err } - return &jwt.Config{ - Email: key.Email, - PrivateKey: []byte(key.PrivateKey), - Scopes: scope, - TokenURL: JWTTokenURL, - }, nil + config := &jwt.Config{ + Email: key.Email, + PrivateKey: []byte(key.PrivateKey), + PrivateKeyID: key.PrivateKeyID, + Scopes: scope, + TokenURL: key.TokenURL, + } + if config.TokenURL == "" { + config.TokenURL = JWTTokenURL + } + return config, nil } // ComputeTokenSource returns a token source that fetches access tokens diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 000000000000..b0fdb3a888ac --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go index 01ba0ecb0084..d29a3bb9bbc5 100644 --- a/vendor/golang.org/x/oauth2/google/sdk.go +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -1,4 +1,4 @@ -// Copyright 2015 The oauth2 Authors. All rights reserved. +// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index dc8ebfc4f76d..fbe1028d64e5 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index ea6716c98c14..18328a0dcf2e 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -91,24 +91,36 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error { var brokenAuthHeaderProviders = []string{ "https://accounts.google.com/", - "https://www.googleapis.com/", - "https://github.com/", - "https://api.instagram.com/", - "https://www.douban.com/", "https://api.dropbox.com/", + "https://api.dropboxapi.com/", + "https://api.instagram.com/", + "https://api.netatmo.net/", + "https://api.odnoklassniki.ru/", + "https://api.pushbullet.com/", "https://api.soundcloud.com/", - "https://www.linkedin.com/", "https://api.twitch.tv/", - "https://oauth.vk.com/", - "https://api.odnoklassniki.ru/", + "https://app.box.com/", "https://connect.stripe.com/", - "https://api.pushbullet.com/", + "https://login.microsoftonline.com/", + "https://login.salesforce.com/", "https://oauth.sandbox.trainingpeaks.com/", "https://oauth.trainingpeaks.com/", - "https://www.strava.com/oauth/", - "https://app.box.com/", + "https://oauth.vk.com/", + "https://openapi.baidu.com/", + "https://slack.com/", "https://test-sandbox.auth.corp.google.com", + "https://test.salesforce.com/", "https://user.gini.net/", + "https://www.douban.com/", + "https://www.googleapis.com/", + "https://www.linkedin.com/", + "https://www.strava.com/oauth/", + "https://www.wunderlist.com/oauth/", + "https://api.patreon.com/", +} + +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL) } // providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL @@ -134,23 +146,23 @@ func providerAuthHeaderWorks(tokenURL string) bool { return true } -func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) { hc, err := ContextClient(ctx) if err != nil { return nil, err } - v.Set("client_id", ClientID) - bustedAuth := !providerAuthHeaderWorks(TokenURL) - if bustedAuth && ClientSecret != "" { - v.Set("client_secret", ClientSecret) + v.Set("client_id", clientID) + bustedAuth := !providerAuthHeaderWorks(tokenURL) + if bustedAuth && clientSecret != "" { + v.Set("client_secret", clientSecret) } - req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") if !bustedAuth { - req.SetBasicAuth(ClientID, ClientSecret) + req.SetBasicAuth(clientID, clientSecret) } r, err := hc.Do(req) if err != nil { diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go index 521e7b49e75b..f1f173e345db 100644 --- a/vendor/golang.org/x/oauth2/internal/transport.go +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -33,6 +33,11 @@ func RegisterContextClientFunc(fn ContextClientFunc) { } func ContextClient(ctx context.Context) (*http.Client, error) { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + } for _, fn := range contextClientFuncs { c, err := fn(ctx) if err != nil { @@ -42,9 +47,6 @@ func ContextClient(ctx context.Context) (*http.Client, error) { return c, nil } } - if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } return http.DefaultClient, nil } diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go index 396b3fac827d..8bcecb46ba5f 100644 --- a/vendor/golang.org/x/oauth2/jws/jws.go +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -1,9 +1,17 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package jws provides encoding and decoding utilities for -// signed JWS messages. +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. package jws import ( @@ -27,8 +35,8 @@ type ClaimSet struct { Iss string `json:"iss"` // email address of the client_id of the application making the access token request Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). - Exp int64 `json:"exp"` // the expiration time of the assertion - Iat int64 `json:"iat"` // the time the assertion was issued. + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) Typ string `json:"typ,omitempty"` // token type (Optional). // Email for which the application is requesting delegated access (Optional). @@ -41,23 +49,22 @@ type ClaimSet struct { // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 // This array is marshalled using custom code (see (c *ClaimSet) encode()). PrivateClaims map[string]interface{} `json:"-"` - - exp time.Time - iat time.Time } func (c *ClaimSet) encode() (string, error) { - if c.exp.IsZero() || c.iat.IsZero() { - // Reverting time back for machines whose time is not perfectly in sync. - // If client machine's time is in the future according - // to Google servers, an access token will not be issued. - now := time.Now().Add(-10 * time.Second) - c.iat = now - c.exp = now.Add(time.Hour) + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) } - - c.Exp = c.exp.Unix() - c.Iat = c.iat.Unix() b, err := json.Marshal(c) if err != nil { @@ -65,7 +72,7 @@ func (c *ClaimSet) encode() (string, error) { } if len(c.PrivateClaims) == 0 { - return base64Encode(b), nil + return base64.RawURLEncoding.EncodeToString(b), nil } // Marshal private claim set and then append it to b. @@ -83,7 +90,7 @@ func (c *ClaimSet) encode() (string, error) { } b[len(b)-1] = ',' // Replace closing curly brace with a comma. b = append(b, prv[1:]...) // Append private claims. - return base64Encode(b), nil + return base64.RawURLEncoding.EncodeToString(b), nil } // Header represents the header for the signed JWS payloads. @@ -93,6 +100,9 @@ type Header struct { // Represents the token type. Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` } func (h *Header) encode() (string, error) { @@ -100,7 +110,7 @@ func (h *Header) encode() (string, error) { if err != nil { return "", err } - return base64Encode(b), nil + return base64.RawURLEncoding.EncodeToString(b), nil } // Decode decodes a claim set from a JWS payload. @@ -111,7 +121,7 @@ func Decode(payload string) (*ClaimSet, error) { // TODO(jbd): Provide more context about the error. return nil, errors.New("jws: invalid token received") } - decoded, err := base64Decode(s[1]) + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) if err != nil { return nil, err } @@ -120,8 +130,11 @@ func Decode(payload string) (*ClaimSet, error) { return c, err } -// Encode encodes a signed JWS with provided header and claim set. -func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) { +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { head, err := header.encode() if err != nil { return "", err @@ -131,30 +144,39 @@ func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, err return "", err } ss := fmt.Sprintf("%s.%s", head, cs) - h := sha256.New() - h.Write([]byte(ss)) - b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil)) + sig, err := sg([]byte(ss)) if err != nil { return "", err } - sig := base64Encode(b) - return fmt.Sprintf("%s.%s", ss, sig), nil + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil } -// base64Encode returns and Base64url encoded version of the input string with any -// trailing "=" stripped. -func base64Encode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) } -// base64Decode decodes the Base64url encoded string -func base64Decode(s string) ([]byte, error) { - // add back missing padding - switch len(s) % 4 { - case 2: - s += "==" - case 3: - s += "=" +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") } - return base64.URLEncoding.DecodeString(s) + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) } diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go index 205d23ed4387..f4b9523e6e4c 100644 --- a/vendor/golang.org/x/oauth2/jwt/jwt.go +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -46,6 +46,10 @@ type Config struct { // PrivateKey []byte + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + // Subject is the optional user to impersonate. Subject string @@ -54,6 +58,9 @@ type Config struct { // TokenURL is the endpoint required to complete the 2-legged JWT flow. TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration } // TokenSource returns a JWT TokenSource using the configuration @@ -95,6 +102,9 @@ func (js jwtSource) Token() (*oauth2.Token, error) { // to be compatible with legacy OAuth 2.0 providers. claimSet.Prn = subject } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } payload, err := jws.Encode(defaultHeader, claimSet, pk) if err != nil { return nil, err diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index dfcf238d2304..7b06bfe1ef14 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -1,11 +1,11 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package oauth2 provides support for making // OAuth2 authorized and authenticated HTTP requests. // It can additionally grant authorization with Bearer JWT. -package oauth2 +package oauth2 // import "golang.org/x/oauth2" import ( "bytes" @@ -21,10 +21,26 @@ import ( // NoContext is the default context you should supply if not using // your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. var NoContext = context.TODO() +// RegisterBrokenAuthHeaderProvider registers an OAuth2 server +// identified by the tokenURL prefix as an OAuth2 implementation +// which doesn't support the HTTP Basic authentication +// scheme to authenticate with the authorization server. +// Once a server is registered, credentials (client_id and client_secret) +// will be passed as query parameters rather than being present +// in the Authorization header. +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +func RegisterBrokenAuthHeaderProvider(tokenURL string) { + internal.RegisterBrokenAuthHeaderProvider(tokenURL) +} + // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). type Config struct { // ClientID is the application's ID. ClientID string @@ -283,7 +299,7 @@ func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { c, err := internal.ContextClient(ctx) if err != nil { - return &http.Client{Transport: internal.ErrorTransport{err}} + return &http.Client{Transport: internal.ErrorTransport{Err: err}} } return c } diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index ebbdddbdceb4..7a3167f15b04 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -7,6 +7,7 @@ package oauth2 import ( "net/http" "net/url" + "strconv" "strings" "time" @@ -92,14 +93,28 @@ func (t *Token) WithExtra(extra interface{}) *Token { // Extra fields are key-value pairs returned by the server as a // part of the token retrieval response. func (t *Token) Extra(key string) interface{} { - if vals, ok := t.raw.(url.Values); ok { - // TODO(jbd): Cast numeric values to int64 or float64. - return vals.Get(key) - } if raw, ok := t.raw.(map[string]interface{}); ok { return raw[key] } - return nil + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v } // expired reports whether the token is expired. diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 90db088332b4..92ac7e2531f4 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -1,4 +1,4 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. +// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go b/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go index 30f1b6bef26f..e98adbe38c68 100644 --- a/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go +++ b/vendor/google.golang.org/api/cloudmonitoring/v2beta2/cloudmonitoring-gen.go @@ -7,7 +7,7 @@ // import "google.golang.org/api/cloudmonitoring/v2beta2" // ... // cloudmonitoringService, err := cloudmonitoring.New(oauthHttpClient) -package cloudmonitoring +package cloudmonitoring // import "google.golang.org/api/cloudmonitoring/v2beta2" import ( "bytes" @@ -771,25 +771,23 @@ func (c *MetricDescriptorsCreateCall) Context(ctx context.Context) *MetricDescri } func (c *MetricDescriptorsCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.metricdescriptor) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudmonitoring.metricDescriptors.create" call. @@ -824,7 +822,8 @@ func (c *MetricDescriptorsCreateCall) Do(opts ...googleapi.CallOption) (*MetricD HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -893,20 +892,19 @@ func (c *MetricDescriptorsDeleteCall) Context(ctx context.Context) *MetricDescri } func (c *MetricDescriptorsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors/{metric}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "metric": c.metric, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudmonitoring.metricDescriptors.delete" call. @@ -941,7 +939,8 @@ func (c *MetricDescriptorsDeleteCall) Do(opts ...googleapi.CallOption) (*DeleteM HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1057,22 +1056,21 @@ func (c *MetricDescriptorsListCall) Context(ctx context.Context) *MetricDescript } func (c *MetricDescriptorsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/metricDescriptors") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudmonitoring.metricDescriptors.list" call. @@ -1107,7 +1105,8 @@ func (c *MetricDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListMetri HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1321,23 +1320,22 @@ func (c *TimeseriesListCall) Context(ctx context.Context) *TimeseriesListCall { } func (c *TimeseriesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/timeseries/{metric}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "metric": c.metric, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudmonitoring.timeseries.list" call. @@ -1372,7 +1370,8 @@ func (c *TimeseriesListCall) Do(opts ...googleapi.CallOption) (*ListTimeseriesRe HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1538,25 +1537,23 @@ func (c *TimeseriesWriteCall) Context(ctx context.Context) *TimeseriesWriteCall } func (c *TimeseriesWriteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.writetimeseriesrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/timeseries:write") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudmonitoring.timeseries.write" call. @@ -1591,7 +1588,8 @@ func (c *TimeseriesWriteCall) Do(opts ...googleapi.CallOption) (*WriteTimeseries HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1766,23 +1764,22 @@ func (c *TimeseriesDescriptorsListCall) Context(ctx context.Context) *Timeseries } func (c *TimeseriesDescriptorsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/timeseriesDescriptors/{metric}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "metric": c.metric, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudmonitoring.timeseriesDescriptors.list" call. @@ -1818,7 +1815,8 @@ func (c *TimeseriesDescriptorsListCall) Do(opts ...googleapi.CallOption) (*ListT HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index b9e0b1ac50cd..1265f0425df4 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"jQLIOHBVnDZie4rQHGH1WJF-INE/-kKJM_jdN_4N4POlnVybNFH0Kag\"", + "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/IHPQejAPoHbj6rriHVm8lNKt_bg\"", "discoveryVersion": "v1", "id": "compute:v1", "name": "compute", "version": "v1", - "revision": "20160426", + "revision": "20160812", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -94,7 +94,7 @@ "AccessConfig": { "id": "AccessConfig", "type": "object", - "description": "An access configuration attached to an instance's network interface.", + "description": "An access configuration attached to an instance's network interface. Only one access config per instance is supported.", "properties": { "kind": { "type": "string", @@ -272,6 +272,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -300,6 +301,7 @@ "", "", "", + "", "" ] }, @@ -345,6 +347,10 @@ "type": "string", "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks." }, + "diskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts or decrypts a disk using a customer-supplied encryption key.\n\nIf you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key.\n\nIf you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance.\n\nIf you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group." + }, "index": { "type": "integer", "description": "Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value.", @@ -392,7 +398,7 @@ }, "source": { "type": "string", - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks." + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks. Note that for InstanceTemplate, it is just disk name, not URL for the disk." }, "type": { "type": "string", @@ -429,11 +435,15 @@ }, "diskType": { "type": "string", - "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\n\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard \n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType" + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\n\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard \n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL." }, "sourceImage": { "type": "string", "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" + }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." } } }, @@ -444,7 +454,7 @@ "properties": { "autoscalingPolicy": { "$ref": "AutoscalingPolicy", - "description": "The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization.\n\nIf none of these are specified, the default will be to autoscale based on cpuUtilization to 0.8 or 80%." + "description": "The configuration parameters for the autoscaling algorithm. You can define one or more of the policies for an autoscaler: cpuUtilization, customMetricUtilizations, and loadBalancingUtilization.\n\nIf none of these are specified, the default will be to autoscale based on cpuUtilization to 0.6 or 60%." }, "creationTimestamp": { "type": "string", @@ -484,7 +494,7 @@ }, "zone": { "type": "string", - "description": "[Output Only] URL of the zone where the instance group resides." + "description": "[Output Only] URL of the zone where the instance group resides (for autoscalers living in zonal scope)." } } }, @@ -572,6 +582,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -600,6 +611,7 @@ "", "", "", + "", "" ] }, @@ -672,7 +684,7 @@ "properties": { "utilizationTarget": { "type": "number", - "description": "The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.8.\n\nIf the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization.\n\nIf the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.", + "description": "The target CPU utilization that the autoscaler should maintain. Must be a float value in the range (0, 1]. If not specified, the default is 0.6.\n\nIf the CPU level is below the target utilization, the autoscaler scales down the number of instances until it reaches the minimum number of instances you specified or until the average CPU of your instances reaches the target utilization.\n\nIf the average CPU is above the target utilization, the autoscaler scales up until it reaches the maximum number of instances you specified or until the average utilization reaches the target utilization.", "format": "double" } } @@ -726,7 +738,7 @@ "properties": { "balancingMode": { "type": "string", - "description": "Specifies the balancing mode for this backend. For global HTTP(S) load balancing, the default is UTILIZATION. Valid values are UTILIZATION and RATE.", + "description": "Specifies the balancing mode for this backend. For global HTTP(S) load balancing, the default is UTILIZATION. Valid values are UTILIZATION and RATE.\n\nThis cannot be used for internal load balancing.", "enum": [ "RATE", "UTILIZATION" @@ -738,7 +750,7 @@ }, "capacityScaler": { "type": "number", - "description": "A multiplier applied to the group's maximum servicing capacity (either UTILIZATION or RATE). Default value is 1, which means the group will serve up to 100% of its configured CPU or RPS (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available CPU or RPS. Valid range is [0.0,1.0].", + "description": "A multiplier applied to the group's maximum servicing capacity (either UTILIZATION or RATE). Default value is 1, which means the group will serve up to 100% of its configured CPU or RPS (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available CPU or RPS. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", "format": "float" }, "description": { @@ -747,21 +759,21 @@ }, "group": { "type": "string", - "description": "The fully-qualified URL of a zonal Instance Group resource. This instance group defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nNote that you must specify an Instance Group resource using the fully-qualified URL, rather than a partial URL." + "description": "The fully-qualified URL of a zonal Instance Group resource. This instance group defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nNote that you must specify an Instance Group resource using the fully-qualified URL, rather than a partial URL.\n\nWhen the BackendService has load balancing scheme INTERNAL, the instance group must be in a zone within the same region as the BackendService." }, "maxRate": { "type": "integer", - "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.", + "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", "format": "int32" }, "maxRatePerInstance": { "type": "number", - "description": "The max requests per second (RPS) that a single backend instance can handle.This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.", + "description": "The max requests per second (RPS) that a single backend instance can handle.This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", "format": "float" }, "maxUtilization": { "type": "number", - "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].", + "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].\n\nThis cannot be used for internal load balancing.", "format": "float" } } @@ -771,6 +783,11 @@ "type": "object", "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity.", "properties": { + "affinityCookieTtlSec": { + "type": "integer", + "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "format": "int32" + }, "backends": { "type": "array", "description": "The list of backends that serve this BackendService.", @@ -786,6 +803,10 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, + "enableCDN": { + "type": "boolean", + "description": "If true, enable Cloud CDN for this BackendService.\n\nWhen the load balancing scheme is INTERNAL, this field is not used." + }, "fingerprint": { "type": "string", "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService.", @@ -793,7 +814,7 @@ }, "healthChecks": { "type": "array", - "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required.", + "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", "items": { "type": "string" } @@ -815,16 +836,16 @@ }, "port": { "type": "integer", - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used for internal load balancing.", "format": "int32" }, "portName": { "type": "string", - "description": "Name of backend port. The same name should appear in the instance groups referenced by this service. Required." + "description": "Name of backend port. The same name should appear in the instance groups referenced by this service. Required when the load balancing scheme is EXTERNAL.\n\nWhen the load balancing scheme is INTERNAL, this field is not used." }, "protocol": { "type": "string", - "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP and SSL.", + "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.", "enum": [ "HTTP", "HTTPS" @@ -842,6 +863,22 @@ "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, + "sessionAffinity": { + "type": "string", + "description": "Type of session affinity to use. The default is NONE.\n\nWhen the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.\n\nWhen the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the protocol is UDP, this field is not used.", + "enum": [ + "CLIENT_IP", + "CLIENT_IP_PROTO", + "GENERATED_COOKIE", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, "timeoutSec": { "type": "integer", "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.", @@ -873,7 +910,7 @@ "properties": { "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + "description": "[Output Only] Unique identifier for the resource; defined by the server." }, "items": { "type": "array", @@ -889,7 +926,7 @@ }, "nextPageToken": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "[Output Only] A token used to continue a truncated list request." }, "selfLink": { "type": "string", @@ -897,6 +934,44 @@ } } }, + "CacheInvalidationRule": { + "id": "CacheInvalidationRule", + "type": "object", + "properties": { + "path": { + "type": "string" + } + } + }, + "CustomerEncryptionKey": { + "id": "CustomerEncryptionKey", + "type": "object", + "description": "Represents a customer-supplied encryption key", + "properties": { + "rawKey": { + "type": "string", + "description": "Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource." + }, + "sha256": { + "type": "string", + "description": "[Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource." + } + } + }, + "CustomerEncryptionKeyProtectedDisk": { + "id": "CustomerEncryptionKeyProtectedDisk", + "type": "object", + "properties": { + "diskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Decrypts data associated with the disk with a customer-supplied encryption key." + }, + "source": { + "type": "string", + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks." + } + } + }, "DeprecationStatus": { "id": "DeprecationStatus", "type": "object", @@ -947,6 +1022,10 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, + "diskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -967,7 +1046,7 @@ }, "licenses": { "type": "array", - "description": "[Output Only] Any applicable publicly visible licenses.", + "description": "Any applicable publicly visible licenses.", "items": { "type": "string" } @@ -999,6 +1078,10 @@ "type": "string", "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." + }, "sourceImageId": { "type": "string", "description": "[Output Only] The ID value of the image used to create this disk. This value identifies the exact image that was used to create this persistent disk. For example, if you created the persistent disk from an image that was later deleted and recreated under the same name, the source image ID would identify the exact version of the image that was used." @@ -1007,6 +1090,10 @@ "type": "string", "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot \n- projects/project/global/snapshots/snapshot \n- global/snapshots/snapshot" }, + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." + }, "sourceSnapshotId": { "type": "string", "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used." @@ -1255,6 +1342,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -1283,6 +1371,7 @@ "", "", "", + "", "" ] }, @@ -1344,6 +1433,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -1372,6 +1462,7 @@ "", "", "", + "", "" ] }, @@ -1407,17 +1498,17 @@ "properties": { "allowed": { "type": "array", - "description": "The list of rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", + "description": "The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", "items": { "type": "object", "properties": { "IPProtocol": { "type": "string", - "description": "The IP protocol that is allowed for this rule. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol number." + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol number." }, "ports": { "type": "array", - "description": "An optional list of ports which are allowed. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, connections through any port are allowed\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", "items": { "type": "string" } @@ -1464,14 +1555,14 @@ }, "sourceRanges": { "type": "array", - "description": "The IP address blocks that this rule applies to, expressed in CIDR format. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.", + "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", "items": { "type": "string" } }, "sourceTags": { "type": "array", - "description": "A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.", + "description": "If source tags are specified, the firewall will apply only to traffic with source IP that belongs to a tag listed in source tags. Source tags cannot be used to control traffic to an instance's external IP address. Because tags are associated with an instance, not an IP address. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", "items": { "type": "string" } @@ -1523,11 +1614,11 @@ "properties": { "IPAddress": { "type": "string", - "description": "Value of the reserved IP address that this forwarding rule is serving on behalf of. For global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. If left empty (default value), an ephemeral IP from the same scope (global or regional) will be assigned." + "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IP from the same scope (global or regional) will be assigned.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule." }, "IPProtocol": { "type": "string", - "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.", + "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL\u003c/code, only TCP and UDP are valid.", "enum": [ "AH", "ESP", @@ -1568,7 +1659,7 @@ }, "portRange": { "type": "string", - "description": "Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges." + "description": "Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nThis field is not used for internal load balancing." }, "region": { "type": "string", @@ -1580,7 +1671,7 @@ }, "target": { "type": "string", - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy or TargetHttpsProxy resource. The forwarded traffic must be of a type appropriate to the target object. For example, TargetHttpProxy requires HTTP traffic, and TargetHttpsProxy requires HTTPS traffic." + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy or TargetHttpsProxy resource. The forwarded traffic must be of a type appropriate to the target object. For example, TargetHttpProxy requires HTTP traffic, and TargetHttpsProxy requires HTTPS traffic.\n\nThis field is not used for internal load balancing." } } }, @@ -1668,6 +1759,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -1696,6 +1788,7 @@ "", "", "", + "", "" ] }, @@ -2012,13 +2105,17 @@ }, "family": { "type": "string", - "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated." + "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035." }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, + "imageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the image using a customer-supplied encryption key.\n\nAfter you encrypt an image with a customer-supplied key, you must provide the same key if you use the image later (e.g. to create a disk from the image).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the image, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the image later." + }, "kind": { "type": "string", "description": "[Output Only] Type of the resource. Always compute#image for images.", @@ -2026,7 +2123,7 @@ }, "licenses": { "type": "array", - "description": "Any applicable publicly visible licenses.", + "description": "Any applicable license URI.", "items": { "type": "string" } @@ -2079,6 +2176,10 @@ "type": "string", "description": "URL of the The source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disk/disk \n- projects/project/zones/zone/disk/disk \n- zones/zone/disks/disk" }, + "sourceDiskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + }, "sourceDiskId": { "type": "string", "description": "The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name." @@ -2198,7 +2299,7 @@ }, "networkInterfaces": { "type": "array", - "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet.", + "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Only one interface is supported per instance.", "items": { "$ref": "NetworkInterface" } @@ -2213,14 +2314,14 @@ }, "serviceAccounts": { "type": "array", - "description": "A list of service accounts, with their specified scopes, authorized for this instance. Service accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Authenticating from Google Compute Engine for more information.", + "description": "A list of service accounts, with their specified scopes, authorized for this instance. Service accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.", "items": { "$ref": "ServiceAccount" } }, "status": { "type": "string", - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, and TERMINATED.", + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDED, SUSPENDING, and TERMINATED.", "enum": [ "PROVISIONING", "RUNNING", @@ -2350,7 +2451,7 @@ }, "zone": { "type": "string", - "description": "[Output Only] The URL of the zone where the instance group is located." + "description": "[Output Only] The URL of the zone where the instance group is located (for zonal resources)." } } }, @@ -2419,6 +2520,7 @@ "InstanceGroupManager": { "id": "InstanceGroupManager", "type": "object", + "description": "An Instance Group Manager resource.", "properties": { "baseInstanceName": { "type": "string", @@ -2505,7 +2607,7 @@ }, "zone": { "type": "string", - "description": "The name of the zone where the managed instance group is located." + "description": "[Output Only] The URL of the zone where the managed instance group is located (for zonal resources)." } } }, @@ -2520,7 +2622,12 @@ }, "creating": { "type": "integer", - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create one of these instances, it tries again until it creates the instance successfully.", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create any of these instances, it tries again until it creates the instance successfully.\n\nIf you have disabled creation retries, this field will not be populated; instead, the creatingWithoutRetries field will be populated.", + "format": "int32" + }, + "creatingWithoutRetries": { + "type": "integer", + "description": "[Output Only] The number of instances that the managed instance group will attempt to create. The group attempts to create each instance only once. If the group fails to create any of these instances, it decreases the group's target_size value accordingly.", "format": "int32" }, "deleting": { @@ -2686,6 +2793,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -2714,6 +2822,7 @@ "", "", "", + "", "" ] }, @@ -2789,7 +2898,7 @@ "properties": { "id": { "type": "string", - "description": "[Output Only] A unique identifier for this list of instance groups. The server generates this identifier." + "description": "[Output Only] A unique identifier for this list of instances in the specified instance group. The server generates this identifier." }, "items": { "type": "array", @@ -2800,7 +2909,7 @@ }, "kind": { "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for lists of instance groups.", + "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for the list of instances in the specified instance group.", "default": "compute#instanceGroupsListInstances" }, "nextPageToken": { @@ -2809,7 +2918,7 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] The URL for this list of instance groups. The server generates this URL." + "description": "[Output Only] The URL for this list of instances in the specified instance groups. The server generates this URL." } } }, @@ -2866,6 +2975,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -2894,6 +3004,7 @@ "", "", "", + "", "" ] }, @@ -2992,7 +3103,7 @@ "properties": { "canIpForward": { "type": "boolean", - "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the canIpForward documentation for more information." + "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding for instances documentation for more information." }, "description": { "type": "string", @@ -3189,6 +3300,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -3217,6 +3329,7 @@ "", "", "", + "", "" ] }, @@ -3255,6 +3368,19 @@ } } }, + "InstancesStartWithEncryptionKeyRequest": { + "id": "InstancesStartWithEncryptionKeyRequest", + "type": "object", + "properties": { + "disks": { + "type": "array", + "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key.\n\nIn order to start the instance, the disk url and its corresponding key must be provided.\n\nIf the disk is not protected with a customer-supplied encryption key it should not be specified.", + "items": { + "$ref": "CustomerEncryptionKeyProtectedDisk" + } + } + } + }, "License": { "id": "License", "type": "object", @@ -3317,6 +3443,10 @@ "description": "[Deprecated] This property is deprecated and will never be populated with any relevant values.", "format": "int32" }, + "isSharedCpu": { + "type": "boolean", + "description": "[Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information." + }, "kind": { "type": "string", "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", @@ -3450,6 +3580,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -3478,6 +3609,7 @@ "", "", "", + "", "" ] }, @@ -3512,10 +3644,11 @@ "properties": { "currentAction": { "type": "string", - "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's target_size value is decreased. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", + "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", "enum": [ "ABANDONING", "CREATING", + "CREATING_WITHOUT_RETRIES", "DELETING", "NONE", "RECREATING", @@ -3529,6 +3662,7 @@ "", "", "", + "", "" ] }, @@ -3734,7 +3868,7 @@ "properties": { "accessConfigs": { "type": "array", - "description": "An array of configurations for this interface. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.", + "description": "An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access.", "items": { "$ref": "AccessConfig" } @@ -3754,7 +3888,7 @@ }, "networkIP": { "type": "string", - "description": "An IPV4 internal network address to assign to the instance for this network interface. If not specified by user an unused internal IP is assigned by system." + "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system." }, "subnetwork": { "type": "string", @@ -3933,6 +4067,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -3961,6 +4096,7 @@ "", "", "", + "", "" ] }, @@ -4078,6 +4214,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -4106,6 +4243,7 @@ "", "", "", + "", "" ] }, @@ -4191,6 +4329,10 @@ "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, + "defaultServiceAccount": { + "type": "string", + "description": "[Output Only] Default service account used by VMs running in this project." + }, "description": { "type": "string", "description": "An optional textual description of the resource." @@ -4262,6 +4404,7 @@ "IN_USE_ADDRESSES", "LOCAL_SSD_TOTAL_GB", "NETWORKS", + "ROUTERS", "ROUTES", "SNAPSHOTS", "SSD_TOTAL_GB", @@ -4272,6 +4415,7 @@ "TARGET_HTTP_PROXIES", "TARGET_INSTANCES", "TARGET_POOLS", + "TARGET_SSL_PROXIES", "TARGET_VPN_GATEWAYS", "URL_MAPS", "VPN_TUNNELS" @@ -4304,6 +4448,8 @@ "", "", "", + "", + "", "" ] }, @@ -4528,6 +4674,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -4556,6 +4703,7 @@ "", "", "", + "", "" ] }, @@ -4616,76 +4764,22 @@ } } }, - "Scheduling": { - "id": "Scheduling", - "type": "object", - "description": "Sets the scheduling options for an Instance.", - "properties": { - "automaticRestart": { - "type": "boolean", - "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted." - }, - "onHostMaintenance": { - "type": "string", - "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options.", - "enum": [ - "MIGRATE", - "TERMINATE" - ], - "enumDescriptions": [ - "", - "" - ] - }, - "preemptible": { - "type": "boolean", - "description": "Whether the instance is preemptible." - } - } - }, - "SerialPortOutput": { - "id": "SerialPortOutput", - "type": "object", - "description": "An instance's serial console output.", - "properties": { - "contents": { - "type": "string", - "description": "[Output Only] The contents of the console output." - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", - "default": "compute#serialPortOutput" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - } - } - }, - "ServiceAccount": { - "id": "ServiceAccount", + "Router": { + "id": "Router", "type": "object", - "description": "A service account.", + "description": "Router resource.", "properties": { - "email": { - "type": "string", - "description": "Email address of the service account." + "bgp": { + "$ref": "RouterBgp", + "description": "BGP information specific to this router." }, - "scopes": { + "bgpPeers": { "type": "array", - "description": "The list of scopes to be made available for this service account.", + "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Please refer to RFC4273.", "items": { - "type": "string" + "$ref": "RouterBgpPeer" } - } - } - }, - "Snapshot": { - "id": "Snapshot", - "type": "object", - "description": "A persistent disk snapshot resource.", - "properties": { + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -4694,102 +4788,73 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, - "diskSizeGb": { - "type": "string", - "description": "[Output Only] Size of the snapshot, specified in GB.", - "format": "int64" - }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#snapshot for Snapshot resources.", - "default": "compute#snapshot" - }, - "licenses": { + "interfaces": { "type": "array", - "description": "[Output Only] A list of public visible licenses that apply to this snapshot. This can be because the original image had licenses attached (such as a Windows image).", + "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel) or IP address and IP address range (e.g. ipRange).", "items": { - "type": "string" + "$ref": "RouterInterface" } }, - "name": { - "type": "string", - "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sourceDisk": { + "kind": { "type": "string", - "description": "[Output Only] The source disk used to create this snapshot." + "description": "[Output Only] Type of resource. Always compute#router for routers.", + "default": "compute#router" }, - "sourceDiskId": { + "name": { "type": "string", - "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name." + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.routers.insert" + ] + } }, - "status": { + "network": { "type": "string", - "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", - "enum": [ - "CREATING", - "DELETING", - "FAILED", - "READY", - "UPLOADING" - ], - "enumDescriptions": [ - "", - "", - "", - "", - "" - ] + "description": "URI of the network to which this router belongs.", + "annotations": { + "required": [ + "compute.routers.insert" + ] + } }, - "storageBytes": { + "region": { "type": "string", - "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", - "format": "int64" + "description": "[Output Only] URI of the region where the router resides." }, - "storageBytesStatus": { + "selfLink": { "type": "string", - "description": "[Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date.", - "enum": [ - "UPDATING", - "UP_TO_DATE" - ], - "enumDescriptions": [ - "", - "" - ] + "description": "[Output Only] Server-defined URL for the resource." } } }, - "SnapshotList": { - "id": "SnapshotList", + "RouterAggregatedList": { + "id": "RouterAggregatedList", "type": "object", - "description": "Contains a list of Snapshot resources.", + "description": "Contains a list of routers.", "properties": { "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." }, "items": { - "type": "array", - "description": "[Output Only] A list of Snapshot resources.", - "items": { - "$ref": "Snapshot" + "type": "object", + "description": "A map of scoped router lists.", + "additionalProperties": { + "$ref": "RoutersScopedList", + "description": "Name of the scope containing this set of routers." } }, "kind": { "type": "string", "description": "Type of resource.", - "default": "compute#snapshotList" + "default": "compute#routerAggregatedList" }, "nextPageToken": { "type": "string", @@ -4801,68 +4866,89 @@ } } }, - "SslCertificate": { - "id": "SslCertificate", + "RouterBgp": { + "id": "RouterBgp", "type": "object", - "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", "properties": { - "certificate": { - "type": "string", - "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert." + "asn": { + "type": "integer", + "description": "Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN.", + "format": "uint32" + } + } + }, + "RouterBgpPeer": { + "id": "RouterBgpPeer", + "type": "object", + "properties": { + "advertisedRoutePriority": { + "type": "integer", + "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", + "format": "uint32" }, - "creationTimestamp": { + "interfaceName": { "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." + "description": "Name of the interface the BGP peer is associated with." }, - "description": { + "ipAddress": { "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." + "description": "IP address of the interface inside Google Cloud Platform." }, - "id": { + "name": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" + "description": "Name of this BGP peer. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, - "kind": { - "type": "string", - "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", - "default": "compute#sslCertificate" + "peerAsn": { + "type": "integer", + "description": "Peer BGP Autonomous System Number (ASN). For VPN use case, this value can be different for every tunnel.", + "format": "uint32" }, - "name": { + "peerIpAddress": { "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + "description": "IP address of the BGP interface outside Google cloud." + } + } + }, + "RouterInterface": { + "id": "RouterInterface", + "type": "object", + "properties": { + "ipRange": { + "type": "string", + "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface." }, - "privateKey": { + "linkedVpnTunnel": { "type": "string", - "description": "A write-only private key in PEM format. Only insert RPCs will include this field." + "description": "URI of linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource." }, - "selfLink": { + "name": { "type": "string", - "description": "[Output only] Server-defined URL for the resource." + "description": "Name of this interface entry. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" } } }, - "SslCertificateList": { - "id": "SslCertificateList", + "RouterList": { + "id": "RouterList", "type": "object", - "description": "Contains a list of SslCertificate resources.", + "description": "Contains a list of Router resources.", "properties": { "id": { "type": "string", - "description": "[Output Only] Unique identifier for the resource. Defined by the server." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." }, "items": { "type": "array", - "description": "A list of SslCertificate resources.", + "description": "A list of Router resources.", "items": { - "$ref": "SslCertificate" + "$ref": "Router" } }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#sslCertificateList" + "description": "[Output Only] Type of resource. Always compute#router for routers.", + "default": "compute#routerList" }, "nextPageToken": { "type": "string", @@ -4870,136 +4956,131 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "description": "[Output Only] Server-defined URL for the resource." } } }, - "Subnetwork": { - "id": "Subnetwork", + "RouterStatus": { + "id": "RouterStatus", "type": "object", - "description": "A Subnetwork resource.", "properties": { - "creationTimestamp": { - "type": "string", - "description": "[Output Only] Creation timestamp in RFC3339 text format." + "bestRoutes": { + "type": "array", + "description": "Best routes for this router's network.", + "items": { + "$ref": "Route" + } }, - "description": { + "bgpPeerStatus": { + "type": "array", + "items": { + "$ref": "RouterStatusBgpPeerStatus" + } + }, + "network": { "type": "string", - "description": "An optional description of this resource. Provide this property when you create the resource." + "description": "URI of the network to which this router belongs." + } + } + }, + "RouterStatusBgpPeerStatus": { + "id": "RouterStatusBgpPeerStatus", + "type": "object", + "properties": { + "advertisedRoutes": { + "type": "array", + "description": "Routes that were advertised to the remote BGP peer", + "items": { + "$ref": "Route" + } }, - "gatewayAddress": { + "ipAddress": { "type": "string", - "description": "[Output Only] The gateway address for default routes to reach destination addresses outside this subnetwork." + "description": "IP address of the local BGP interface." }, - "id": { + "linkedVpnTunnel": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", - "format": "uint64" + "description": "URL of the VPN tunnel that this BGP peer controls." }, - "ipCidrRange": { + "name": { "type": "string", - "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network." + "description": "Name of this BGP peer. Unique within the Routers resource." }, - "kind": { + "numLearnedRoutes": { + "type": "integer", + "description": "Number of routes learned from the remote BGP Peer.", + "format": "uint32" + }, + "peerIpAddress": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#subnetwork for Subnetwork resources.", - "default": "compute#subnetwork" + "description": "IP address of the remote BGP interface." }, - "name": { + "state": { "type": "string", - "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + "description": "BGP state as specified in RFC1771." }, - "network": { + "status": { "type": "string", - "description": "The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. Only networks that are in the distributed mode can have subnetworks." + "description": "Status of the BGP peer: {UP, DOWN}", + "enum": [ + "DOWN", + "UNKNOWN", + "UP" + ], + "enumDescriptions": [ + "", + "", + "" + ] }, - "region": { + "uptime": { "type": "string", - "description": "URL of the region where the Subnetwork resides." + "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds" }, - "selfLink": { + "uptimeSeconds": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "Time this session has been up, in seconds. Format: 145" } } }, - "SubnetworkAggregatedList": { - "id": "SubnetworkAggregatedList", + "RouterStatusResponse": { + "id": "RouterStatusResponse", "type": "object", "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "object", - "description": "[Output] A map of scoped Subnetwork lists.", - "additionalProperties": { - "$ref": "SubnetworksScopedList", - "description": "Name of the scope containing this set of Subnetworks." - } - }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#subnetworkAggregatedList for aggregated lists of subnetworks.", - "default": "compute#subnetworkAggregatedList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "Type of resource.", + "default": "compute#routerStatusResponse" }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "result": { + "$ref": "RouterStatus" } } }, - "SubnetworkList": { - "id": "SubnetworkList", + "RoutersPreviewResponse": { + "id": "RoutersPreviewResponse", "type": "object", - "description": "Contains a list of Subnetwork resources.", "properties": { - "id": { - "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." - }, - "items": { - "type": "array", - "description": "The Subnetwork resources.", - "items": { - "$ref": "Subnetwork" - } - }, - "kind": { - "type": "string", - "description": "[Output Only] Type of resource. Always compute#subnetworkList for lists of subnetworks.", - "default": "compute#subnetworkList" - }, - "nextPageToken": { - "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." - }, - "selfLink": { - "type": "string", - "description": "[Output Only] Server-defined URL for this resource." + "resource": { + "$ref": "Router", + "description": "Preview of given router." } } }, - "SubnetworksScopedList": { - "id": "SubnetworksScopedList", + "RoutersScopedList": { + "id": "RoutersScopedList", "type": "object", "properties": { - "subnetworks": { + "routers": { "type": "array", - "description": "List of subnetworks contained in this scope.", + "description": "List of routers contained in this scope.", "items": { - "$ref": "Subnetwork" + "$ref": "Router" } }, "warning": { "type": "object", - "description": "An informational warning that appears when the list of addresses is empty.", + "description": "Informational warning which replaces the list of routers when the list is empty.", "properties": { "code": { "type": "string", @@ -5008,6 +5089,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -5036,6 +5118,7 @@ "", "", "", + "", "" ] }, @@ -5064,29 +5147,75 @@ } } }, - "Tags": { - "id": "Tags", + "Scheduling": { + "id": "Scheduling", "type": "object", - "description": "A set of instance tags.", + "description": "Sets the scheduling options for an Instance.", "properties": { - "fingerprint": { + "automaticRestart": { + "type": "boolean", + "description": "Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). You can only set the automatic restart option for standard instances. Preemptible instances cannot be automatically restarted." + }, + "onHostMaintenance": { "type": "string", - "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", - "format": "byte" + "description": "Defines the maintenance behavior for this instance. For standard instances, the default behavior is MIGRATE. For preemptible instances, the default and only possible behavior is TERMINATE. For more information, see Setting Instance Scheduling Options.", + "enum": [ + "MIGRATE", + "TERMINATE" + ], + "enumDescriptions": [ + "", + "" + ] }, - "items": { + "preemptible": { + "type": "boolean", + "description": "Whether the instance is preemptible." + } + } + }, + "SerialPortOutput": { + "id": "SerialPortOutput", + "type": "object", + "description": "An instance's serial console output.", + "properties": { + "contents": { + "type": "string", + "description": "[Output Only] The contents of the console output." + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#serialPortOutput for serial port output.", + "default": "compute#serialPortOutput" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "ServiceAccount": { + "id": "ServiceAccount", + "type": "object", + "description": "A service account.", + "properties": { + "email": { + "type": "string", + "description": "Email address of the service account." + }, + "scopes": { "type": "array", - "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", + "description": "The list of scopes to be made available for this service account.", "items": { "type": "string" } } } }, - "TargetHttpProxy": { - "id": "TargetHttpProxy", + "Snapshot": { + "id": "Snapshot", "type": "object", - "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", + "description": "A persistent disk snapshot resource.", "properties": { "creationTimestamp": { "type": "string", @@ -5096,6 +5225,11 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, + "diskSizeGb": { + "type": "string", + "description": "[Output Only] Size of the snapshot, specified in GB.", + "format": "int64" + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -5103,44 +5237,98 @@ }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetHttpProxy for target HTTP proxies.", - "default": "compute#targetHttpProxy" + "description": "[Output Only] Type of the resource. Always compute#snapshot for Snapshot resources.", + "default": "compute#snapshot" + }, + "licenses": { + "type": "array", + "description": "[Output Only] A list of public visible licenses that apply to this snapshot. This can be because the original image had licenses attached (such as a Windows image).", + "items": { + "type": "string" + } }, "name": { "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, - "urlMap": { + "snapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the image later For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." + }, + "sourceDisk": { "type": "string", - "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService." - } - } - }, - "TargetHttpProxyList": { - "id": "TargetHttpProxyList", - "type": "object", - "description": "A list of TargetHttpProxy resources.", - "properties": { - "id": { + "description": "[Output Only] The source disk used to create this snapshot." + }, + "sourceDiskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + }, + "sourceDiskId": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name." + }, + "status": { + "type": "string", + "description": "[Output Only] The status of the snapshot. This can be CREATING, DELETING, FAILED, READY, or UPLOADING.", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY", + "UPLOADING" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ] + }, + "storageBytes": { + "type": "string", + "description": "[Output Only] A size of the the storage used by the snapshot. As snapshots share storage, this number is expected to change with snapshot creation/deletion.", + "format": "int64" + }, + "storageBytesStatus": { + "type": "string", + "description": "[Output Only] An indicator whether storageBytes is in a stable state or it is being adjusted as a result of shared storage reallocation. This status can either be UPDATING, meaning the size of the snapshot is being updated, or UP_TO_DATE, meaning the size of the snapshot is up-to-date.", + "enum": [ + "UPDATING", + "UP_TO_DATE" + ], + "enumDescriptions": [ + "", + "" + ] + } + } + }, + "SnapshotList": { + "id": "SnapshotList", + "type": "object", + "description": "Contains a list of Snapshot resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." }, "items": { "type": "array", - "description": "A list of TargetHttpProxy resources.", + "description": "[Output Only] A list of Snapshot resources.", "items": { - "$ref": "TargetHttpProxy" + "$ref": "Snapshot" } }, "kind": { "type": "string", - "description": "Type of resource. Always compute#targetHttpProxyList for lists of target HTTP proxies.", - "default": "compute#targetHttpProxyList" + "description": "Type of resource.", + "default": "compute#snapshotList" }, "nextPageToken": { "type": "string", @@ -5152,24 +5340,15 @@ } } }, - "TargetHttpsProxiesSetSslCertificatesRequest": { - "id": "TargetHttpsProxiesSetSslCertificatesRequest", - "type": "object", - "properties": { - "sslCertificates": { - "type": "array", - "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", - "items": { - "type": "string" - } - } - } - }, - "TargetHttpsProxy": { - "id": "TargetHttpsProxy", + "SslCertificate": { + "id": "SslCertificate", "type": "object", - "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", + "description": "An SslCertificate resource. This resource provides a mechanism to upload an SSL key and certificate to the load balancer to serve secure connections from the user.", "properties": { + "certificate": { + "type": "string", + "description": "A local certificate file. The certificate must be in PEM format. The certificate chain must be no greater than 5 certs long. The chain must include at least one intermediate cert." + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -5185,51 +5364,44 @@ }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetHttpsProxy for target HTTPS proxies.", - "default": "compute#targetHttpsProxy" + "description": "[Output Only] Type of the resource. Always compute#sslCertificate for SSL certificates.", + "default": "compute#sslCertificate" }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, - "selfLink": { + "privateKey": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." - }, - "sslCertificates": { - "type": "array", - "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, exactly one SSL certificate must be specified.", - "items": { - "type": "string" - } + "description": "A write-only private key in PEM format. Only insert RPCs will include this field." }, - "urlMap": { + "selfLink": { "type": "string", - "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map" + "description": "[Output only] Server-defined URL for the resource." } } }, - "TargetHttpsProxyList": { - "id": "TargetHttpsProxyList", + "SslCertificateList": { + "id": "SslCertificateList", "type": "object", - "description": "Contains a list of TargetHttpsProxy resources.", + "description": "Contains a list of SslCertificate resources.", "properties": { "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + "description": "[Output Only] Unique identifier for the resource. Defined by the server." }, "items": { "type": "array", - "description": "A list of TargetHttpsProxy resources.", + "description": "A list of SslCertificate resources.", "items": { - "$ref": "TargetHttpsProxy" + "$ref": "SslCertificate" } }, "kind": { "type": "string", - "description": "Type of resource. Always compute#targetHttpsProxyList for lists of target HTTPS proxies.", - "default": "compute#targetHttpsProxyList" + "description": "Type of resource.", + "default": "compute#sslCertificateList" }, "nextPageToken": { "type": "string", @@ -5241,10 +5413,10 @@ } } }, - "TargetInstance": { - "id": "TargetInstance", + "Subnetwork": { + "id": "Subnetwork", "type": "object", - "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", + "description": "A Subnetwork resource.", "properties": { "creationTimestamp": { "type": "string", @@ -5254,65 +5426,63 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, + "gatewayAddress": { + "type": "string", + "description": "[Output Only] The gateway address for default routes to reach destination addresses outside this subnetwork." + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, - "instance": { + "ipCidrRange": { "type": "string", - "description": "A URL to the virtual machine instance that handles traffic for this target instance. When creating a target instance, you can provide the fully-qualified URL or a valid partial URL to the desired virtual machine. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance" + "description": "The range of internal addresses that are owned by this subnetwork. Provide this property when you create the subnetwork. For example, 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and non-overlapping within a network." }, "kind": { "type": "string", - "description": "[Output Only] The type of the resource. Always compute#targetInstance for target instances.", - "default": "compute#targetInstance" + "description": "[Output Only] Type of the resource. Always compute#subnetwork for Subnetwork resources.", + "default": "compute#subnetwork" }, "name": { "type": "string", - "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "description": "The name of the resource, provided by the client when initially creating the resource. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, - "natPolicy": { + "network": { "type": "string", - "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", - "enum": [ - "NO_NAT" - ], - "enumDescriptions": [ - "" - ] + "description": "The URL of the network to which this subnetwork belongs, provided by the client when initially creating the subnetwork. Only networks that are in the distributed mode can have subnetworks." }, - "selfLink": { + "region": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "URL of the region where the Subnetwork resides." }, - "zone": { + "selfLink": { "type": "string", - "description": "[Output Only] URL of the zone where the target instance resides." + "description": "[Output Only] Server-defined URL for the resource." } } }, - "TargetInstanceAggregatedList": { - "id": "TargetInstanceAggregatedList", + "SubnetworkAggregatedList": { + "id": "SubnetworkAggregatedList", "type": "object", "properties": { "id": { "type": "string", - "description": "[Output Only] Unique identifier for the resource; defined by the server." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." }, "items": { "type": "object", - "description": "A map of scoped target instance lists.", + "description": "[Output] A map of scoped Subnetwork lists.", "additionalProperties": { - "$ref": "TargetInstancesScopedList", - "description": "Name of the scope containing this set of target instances." + "$ref": "SubnetworksScopedList", + "description": "Name of the scope containing this set of Subnetworks." } }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#targetInstanceAggregatedList" + "description": "[Output Only] Type of resource. Always compute#subnetworkAggregatedList for aggregated lists of subnetworks.", + "default": "compute#subnetworkAggregatedList" }, "nextPageToken": { "type": "string", @@ -5324,10 +5494,10 @@ } } }, - "TargetInstanceList": { - "id": "TargetInstanceList", + "SubnetworkList": { + "id": "SubnetworkList", "type": "object", - "description": "Contains a list of TargetInstance resources.", + "description": "Contains a list of Subnetwork resources.", "properties": { "id": { "type": "string", @@ -5335,15 +5505,15 @@ }, "items": { "type": "array", - "description": "A list of TargetInstance resources.", + "description": "The Subnetwork resources.", "items": { - "$ref": "TargetInstance" + "$ref": "Subnetwork" } }, "kind": { "type": "string", - "description": "Type of resource.", - "default": "compute#targetInstanceList" + "description": "[Output Only] Type of resource. Always compute#subnetworkList for lists of subnetworks.", + "default": "compute#subnetworkList" }, "nextPageToken": { "type": "string", @@ -5355,20 +5525,20 @@ } } }, - "TargetInstancesScopedList": { - "id": "TargetInstancesScopedList", + "SubnetworksScopedList": { + "id": "SubnetworksScopedList", "type": "object", "properties": { - "targetInstances": { + "subnetworks": { "type": "array", - "description": "List of target instances contained in this scope.", + "description": "List of subnetworks contained in this scope.", "items": { - "$ref": "TargetInstance" + "$ref": "Subnetwork" } }, "warning": { "type": "object", - "description": "Informational warning which replaces the list of addresses when the list is empty.", + "description": "An informational warning that appears when the list of addresses is empty.", "properties": { "code": { "type": "string", @@ -5377,6 +5547,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -5405,6 +5576,7 @@ "", "", "", + "", "" ] }, @@ -5433,15 +5605,30 @@ } } }, - "TargetPool": { - "id": "TargetPool", + "Tags": { + "id": "Tags", "type": "object", - "description": "A TargetPool resource. This resource defines a pool of instances, associated HttpHealthCheck resources, and the fallback target pool.", + "description": "A set of instance tags.", "properties": { - "backupPool": { + "fingerprint": { "type": "string", - "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy." + "description": "Specifies a fingerprint for this request, which is essentially a hash of the metadata's contents and used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update metadata. You must always provide an up-to-date fingerprint hash in order to update or change metadata.\n\nTo see the latest fingerprint, make get() request to the instance.", + "format": "byte" }, + "items": { + "type": "array", + "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.", + "items": { + "type": "string" + } + } + } + }, + "TargetHttpProxy": { + "id": "TargetHttpProxy", + "type": "object", + "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.", + "properties": { "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -5450,84 +5637,51 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, - "failoverRatio": { - "type": "number", - "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, backupPool must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio is not set or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", - "format": "float" - }, - "healthChecks": { - "type": "array", - "description": "A list of URLs to the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if all specified health checks pass. An empty list means all member instances will be considered healthy at all times.", - "items": { - "type": "string" - } - }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, - "instances": { - "type": "array", - "description": "A list of resource URLs to the virtual machine instances serving this pool. They must live in zones contained in the same region as this pool.", - "items": { - "type": "string" - } - }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#targetPool for target pools.", - "default": "compute#targetPool" + "description": "[Output Only] Type of resource. Always compute#targetHttpProxy for target HTTP proxies.", + "default": "compute#targetHttpProxy" }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, - "region": { - "type": "string", - "description": "[Output Only] URL of the region where the target pool resides." - }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, - "sessionAffinity": { + "urlMap": { "type": "string", - "description": "Sesssion affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", - "enum": [ - "CLIENT_IP", - "CLIENT_IP_PROTO", - "NONE" - ], - "enumDescriptions": [ - "", - "", - "" - ] + "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService." } } }, - "TargetPoolAggregatedList": { - "id": "TargetPoolAggregatedList", + "TargetHttpProxyList": { + "id": "TargetHttpProxyList", "type": "object", + "description": "A list of TargetHttpProxy resources.", "properties": { "id": { "type": "string", - "description": "[Output Only] Unique identifier for the resource. Defined by the server." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." }, "items": { - "type": "object", - "description": "[Output Only] A map of scoped target pool lists.", - "additionalProperties": { - "$ref": "TargetPoolsScopedList", - "description": "Name of the scope containing this set of target pools." + "type": "array", + "description": "A list of TargetHttpProxy resources.", + "items": { + "$ref": "TargetHttpProxy" } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetPoolAggregatedList for aggregated lists of target pools.", - "default": "compute#targetPoolAggregatedList" + "description": "Type of resource. Always compute#targetHttpProxyList for lists of target HTTP proxies.", + "default": "compute#targetHttpProxyList" }, "nextPageToken": { "type": "string", @@ -5539,43 +5693,84 @@ } } }, - "TargetPoolInstanceHealth": { - "id": "TargetPoolInstanceHealth", + "TargetHttpsProxiesSetSslCertificatesRequest": { + "id": "TargetHttpsProxiesSetSslCertificatesRequest", "type": "object", "properties": { - "healthStatus": { + "sslCertificates": { "type": "array", + "description": "New set of SslCertificate resources to associate with this TargetHttpsProxy resource. Currently exactly one SslCertificate resource must be specified.", "items": { - "$ref": "HealthStatus" + "type": "string" } + } + } + }, + "TargetHttpsProxy": { + "id": "TargetHttpsProxy", + "type": "object", + "description": "A TargetHttpsProxy resource. This resource defines an HTTPS proxy.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetPoolInstanceHealth when checking the health of an instance.", - "default": "compute#targetPoolInstanceHealth" + "description": "[Output Only] Type of resource. Always compute#targetHttpsProxy for target HTTPS proxies.", + "default": "compute#targetHttpsProxy" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "sslCertificates": { + "type": "array", + "description": "URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. Currently, exactly one SSL certificate must be specified.", + "items": { + "type": "string" + } + }, + "urlMap": { + "type": "string", + "description": "A fully-qualified or valid partial URL to the UrlMap resource that defines the mapping from URL to the BackendService. For example, the following are all valid URLs for specifying a URL map: \n- https://www.googleapis.compute/v1/projects/project/global/urlMaps/url-map \n- projects/project/global/urlMaps/url-map \n- global/urlMaps/url-map" } } }, - "TargetPoolList": { - "id": "TargetPoolList", + "TargetHttpsProxyList": { + "id": "TargetHttpsProxyList", "type": "object", - "description": "Contains a list of TargetPool resources.", + "description": "Contains a list of TargetHttpsProxy resources.", "properties": { "id": { "type": "string", - "description": "[Output Only] Unique identifier for the resource. Defined by the server." + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." }, "items": { "type": "array", - "description": "A list of TargetPool resources.", + "description": "A list of TargetHttpsProxy resources.", "items": { - "$ref": "TargetPool" + "$ref": "TargetHttpsProxy" } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetPoolList for lists of target pools.", - "default": "compute#targetPoolList" + "description": "Type of resource. Always compute#targetHttpsProxyList for lists of target HTTPS proxies.", + "default": "compute#targetHttpsProxyList" }, "nextPageToken": { "type": "string", @@ -5587,67 +5782,129 @@ } } }, - "TargetPoolsAddHealthCheckRequest": { - "id": "TargetPoolsAddHealthCheckRequest", - "type": "object", - "properties": { - "healthChecks": { - "type": "array", - "description": "A list of HttpHealthCheck resources to add to the target pool.", - "items": { - "$ref": "HealthCheckReference" - } - } - } - }, - "TargetPoolsAddInstanceRequest": { - "id": "TargetPoolsAddInstanceRequest", + "TargetInstance": { + "id": "TargetInstance", "type": "object", + "description": "A TargetInstance resource. This resource defines an endpoint instance that terminates traffic of certain protocols.", "properties": { - "instances": { - "type": "array", - "description": "A full or partial URL to an instance to add to this target pool. This can be a full or partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name \n- projects/project-id/zones/zone/instances/instance-name \n- zones/zone/instances/instance-name", - "items": { - "$ref": "InstanceReference" - } + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "instance": { + "type": "string", + "description": "A URL to the virtual machine instance that handles traffic for this target instance. When creating a target instance, you can provide the fully-qualified URL or a valid partial URL to the desired virtual machine. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance \n- projects/project/zones/zone/instances/instance \n- zones/zone/instances/instance" + }, + "kind": { + "type": "string", + "description": "[Output Only] The type of the resource. Always compute#targetInstance for target instances.", + "default": "compute#targetInstance" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "natPolicy": { + "type": "string", + "description": "NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported.", + "enum": [ + "NO_NAT" + ], + "enumDescriptions": [ + "" + ] + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "zone": { + "type": "string", + "description": "[Output Only] URL of the zone where the target instance resides." } } }, - "TargetPoolsRemoveHealthCheckRequest": { - "id": "TargetPoolsRemoveHealthCheckRequest", + "TargetInstanceAggregatedList": { + "id": "TargetInstanceAggregatedList", "type": "object", "properties": { - "healthChecks": { - "type": "array", - "description": "Health check URL to be removed. This can be a full or valid partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check \n- projects/project/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", - "items": { - "$ref": "HealthCheckReference" + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource; defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped target instance lists.", + "additionalProperties": { + "$ref": "TargetInstancesScopedList", + "description": "Name of the scope containing this set of target instances." } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetInstanceAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." } } }, - "TargetPoolsRemoveInstanceRequest": { - "id": "TargetPoolsRemoveInstanceRequest", + "TargetInstanceList": { + "id": "TargetInstanceList", "type": "object", + "description": "Contains a list of TargetInstance resources.", "properties": { - "instances": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { "type": "array", - "description": "URLs of the instances to be removed from target pool.", + "description": "A list of TargetInstance resources.", "items": { - "$ref": "InstanceReference" + "$ref": "TargetInstance" } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#targetInstanceList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." } } }, - "TargetPoolsScopedList": { - "id": "TargetPoolsScopedList", + "TargetInstancesScopedList": { + "id": "TargetInstancesScopedList", "type": "object", "properties": { - "targetPools": { + "targetInstances": { "type": "array", - "description": "List of target pools contained in this scope.", + "description": "List of target instances contained in this scope.", "items": { - "$ref": "TargetPool" + "$ref": "TargetInstance" } }, "warning": { @@ -5661,6 +5918,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -5689,6 +5947,7 @@ "", "", "", + "", "" ] }, @@ -5717,20 +5976,15 @@ } } }, - "TargetReference": { - "id": "TargetReference", - "type": "object", - "properties": { - "target": { - "type": "string" - } - } - }, - "TargetVpnGateway": { - "id": "TargetVpnGateway", + "TargetPool": { + "id": "TargetPool", "type": "object", - "description": "Represents a Target VPN gateway resource.", + "description": "A TargetPool resource. This resource defines a pool of instances, associated HttpHealthCheck resources, and the fallback target pool.", "properties": { + "backupPool": { + "type": "string", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its failoverRatio field is properly set to a value between [0, 1].\n\nbackupPool and failoverRatio together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below failoverRatio, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio and backupPool are not set, or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy." + }, "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." @@ -5739,58 +5993,56 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, - "forwardingRules": { - "type": "array", - "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", - "items": { - "type": "string" - } + "failoverRatio": { + "type": "number", + "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, backupPool must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy instances in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where failoverRatio is not set or all the instances in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy instances with the best effort, or to all instances when no instance is healthy.", + "format": "float" + }, + "healthChecks": { + "type": "array", + "description": "A list of URLs to the HttpHealthCheck resource. A member instance in this pool is considered healthy if and only if all specified health checks pass. An empty list means all member instances will be considered healthy at all times.", + "items": { + "type": "string" + } }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, + "instances": { + "type": "array", + "description": "A list of resource URLs to the virtual machine instances serving this pool. They must live in zones contained in the same region as this pool.", + "items": { + "type": "string" + } + }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "default": "compute#targetVpnGateway" + "description": "[Output Only] Type of the resource. Always compute#targetPool for target pools.", + "default": "compute#targetPool" }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "annotations": { - "required": [ - "compute.targetVpnGateways.insert" - ] - } - }, - "network": { - "type": "string", - "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", - "annotations": { - "required": [ - "compute.targetVpnGateways.insert" - ] - } + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" }, "region": { "type": "string", - "description": "[Output Only] URL of the region where the target VPN gateway resides." + "description": "[Output Only] URL of the region where the target pool resides." }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, - "status": { + "sessionAffinity": { "type": "string", - "description": "[Output Only] The status of the VPN gateway.", + "description": "Sesssion affinity option, must be one of the following values:\nNONE: Connections from the same client IP may go to any instance in the pool.\nCLIENT_IP: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy.\nCLIENT_IP_PROTO: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy.", "enum": [ - "CREATING", - "DELETING", - "FAILED", - "READY" + "CLIENT_IP", + "CLIENT_IP_PROTO", + "GENERATED_COOKIE", + "NONE" ], "enumDescriptions": [ "", @@ -5798,36 +6050,29 @@ "", "" ] - }, - "tunnels": { - "type": "array", - "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert method and associated to a VPN gateway.", - "items": { - "type": "string" - } } } }, - "TargetVpnGatewayAggregatedList": { - "id": "TargetVpnGatewayAggregatedList", + "TargetPoolAggregatedList": { + "id": "TargetPoolAggregatedList", "type": "object", "properties": { "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + "description": "[Output Only] Unique identifier for the resource. Defined by the server." }, "items": { "type": "object", - "description": "A map of scoped target vpn gateway lists.", + "description": "[Output Only] A map of scoped target pool lists.", "additionalProperties": { - "$ref": "TargetVpnGatewaysScopedList", - "description": "[Output Only] Name of the scope containing this set of target VPN gateways." + "$ref": "TargetPoolsScopedList", + "description": "Name of the scope containing this set of target pools." } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "default": "compute#targetVpnGatewayAggregatedList" + "description": "[Output Only] Type of resource. Always compute#targetPoolAggregatedList for aggregated lists of target pools.", + "default": "compute#targetPoolAggregatedList" }, "nextPageToken": { "type": "string", @@ -5835,30 +6080,47 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "[Output Only] Server-defined URL for this resource." } } }, - "TargetVpnGatewayList": { - "id": "TargetVpnGatewayList", + "TargetPoolInstanceHealth": { + "id": "TargetPoolInstanceHealth", "type": "object", - "description": "Contains a list of TargetVpnGateway resources.", + "properties": { + "healthStatus": { + "type": "array", + "items": { + "$ref": "HealthStatus" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetPoolInstanceHealth when checking the health of an instance.", + "default": "compute#targetPoolInstanceHealth" + } + } + }, + "TargetPoolList": { + "id": "TargetPoolList", + "type": "object", + "description": "Contains a list of TargetPool resources.", "properties": { "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + "description": "[Output Only] Unique identifier for the resource. Defined by the server." }, "items": { "type": "array", - "description": "[Output Only] A list of TargetVpnGateway resources.", + "description": "A list of TargetPool resources.", "items": { - "$ref": "TargetVpnGateway" + "$ref": "TargetPool" } }, "kind": { "type": "string", - "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", - "default": "compute#targetVpnGatewayList" + "description": "[Output Only] Type of resource. Always compute#targetPoolList for lists of target pools.", + "default": "compute#targetPoolList" }, "nextPageToken": { "type": "string", @@ -5866,24 +6128,76 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] Server-defined URL for the resource." + "description": "[Output Only] Server-defined URL for this resource." } } }, - "TargetVpnGatewaysScopedList": { - "id": "TargetVpnGatewaysScopedList", + "TargetPoolsAddHealthCheckRequest": { + "id": "TargetPoolsAddHealthCheckRequest", "type": "object", "properties": { - "targetVpnGateways": { + "healthChecks": { "type": "array", - "description": "[Output Only] List of target vpn gateways contained in this scope.", + "description": "A list of HttpHealthCheck resources to add to the target pool.", "items": { - "$ref": "TargetVpnGateway" + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsAddInstanceRequest": { + "id": "TargetPoolsAddInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "A full or partial URL to an instance to add to this target pool. This can be a full or partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project-id/zones/zone/instances/instance-name \n- projects/project-id/zones/zone/instances/instance-name \n- zones/zone/instances/instance-name", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsRemoveHealthCheckRequest": { + "id": "TargetPoolsRemoveHealthCheckRequest", + "type": "object", + "properties": { + "healthChecks": { + "type": "array", + "description": "Health check URL to be removed. This can be a full or valid partial URL. For example, the following are valid URLs: \n- https://www.googleapis.com/compute/beta/projects/project/global/httpHealthChecks/health-check \n- projects/project/global/httpHealthChecks/health-check \n- global/httpHealthChecks/health-check", + "items": { + "$ref": "HealthCheckReference" + } + } + } + }, + "TargetPoolsRemoveInstanceRequest": { + "id": "TargetPoolsRemoveInstanceRequest", + "type": "object", + "properties": { + "instances": { + "type": "array", + "description": "URLs of the instances to be removed from target pool.", + "items": { + "$ref": "InstanceReference" + } + } + } + }, + "TargetPoolsScopedList": { + "id": "TargetPoolsScopedList", + "type": "object", + "properties": { + "targetPools": { + "type": "array", + "description": "List of target pools contained in this scope.", + "items": { + "$ref": "TargetPool" } }, "warning": { "type": "object", - "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "description": "Informational warning which replaces the list of addresses when the list is empty.", "properties": { "code": { "type": "string", @@ -5892,6 +6206,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -5920,6 +6235,7 @@ "", "", "", + "", "" ] }, @@ -5948,51 +6264,33 @@ } } }, - "TestFailure": { - "id": "TestFailure", + "TargetReference": { + "id": "TargetReference", "type": "object", "properties": { - "actualService": { - "type": "string" - }, - "expectedService": { - "type": "string" - }, - "host": { - "type": "string" - }, - "path": { + "target": { "type": "string" } } }, - "UrlMap": { - "id": "UrlMap", + "TargetVpnGateway": { + "id": "TargetVpnGateway", "type": "object", - "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", + "description": "Represents a Target VPN gateway resource.", "properties": { "creationTimestamp": { "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, - "defaultService": { - "type": "string", - "description": "The URL of the BackendService resource if none of the hostRules match." - }, "description": { "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, - "fingerprint": { - "type": "string", - "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap.", - "format": "byte" - }, - "hostRules": { + "forwardingRules": { "type": "array", - "description": "The list of HostRules to use against the URL.", + "description": "[Output Only] A list of URLs to the ForwardingRule resources. ForwardingRules are created using compute.forwardingRules.insert and associated to a VPN gateway.", "items": { - "$ref": "HostRule" + "type": "string" } }, "id": { @@ -6002,42 +6300,293 @@ }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", - "default": "compute#urlMap" + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGateway" }, "name": { "type": "string", "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] + } }, - "pathMatchers": { - "type": "array", - "description": "The list of named PathMatchers to use against the URL.", - "items": { - "$ref": "PathMatcher" + "network": { + "type": "string", + "description": "URL of the network to which this VPN gateway is attached. Provided by the client when the VPN gateway is created.", + "annotations": { + "required": [ + "compute.targetVpnGateways.insert" + ] } }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the target VPN gateway resides." + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, - "tests": { - "type": "array", - "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only if all of the test cases pass.", - "items": { - "$ref": "UrlMapTest" - } - } - } - }, - "UrlMapList": { - "id": "UrlMapList", - "type": "object", - "description": "Contains a list of UrlMap resources.", - "properties": { - "id": { + "status": { "type": "string", - "description": "[Output Only] Unique identifier for the resource. Set by the server." + "description": "[Output Only] The status of the VPN gateway.", + "enum": [ + "CREATING", + "DELETING", + "FAILED", + "READY" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, + "tunnels": { + "type": "array", + "description": "[Output Only] A list of URLs to VpnTunnel resources. VpnTunnels are created using compute.vpntunnels.insert method and associated to a VPN gateway.", + "items": { + "type": "string" + } + } + } + }, + "TargetVpnGatewayAggregatedList": { + "id": "TargetVpnGatewayAggregatedList", + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped target vpn gateway lists.", + "additionalProperties": { + "$ref": "TargetVpnGatewaysScopedList", + "description": "[Output Only] Name of the scope containing this set of target VPN gateways." + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGatewayAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "TargetVpnGatewayList": { + "id": "TargetVpnGatewayList", + "type": "object", + "description": "Contains a list of TargetVpnGateway resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "[Output Only] A list of TargetVpnGateway resources.", + "items": { + "$ref": "TargetVpnGateway" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", + "default": "compute#targetVpnGatewayList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "TargetVpnGatewaysScopedList": { + "id": "TargetVpnGatewaysScopedList", + "type": "object", + "properties": { + "targetVpnGateways": { + "type": "array", + "description": "[Output Only] List of target vpn gateways contained in this scope.", + "items": { + "$ref": "TargetVpnGateway" + } + }, + "warning": { + "type": "object", + "description": "[Output Only] Informational warning which replaces the list of addresses when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, + "TestFailure": { + "id": "TestFailure", + "type": "object", + "properties": { + "actualService": { + "type": "string" + }, + "expectedService": { + "type": "string" + }, + "host": { + "type": "string" + }, + "path": { + "type": "string" + } + } + }, + "UrlMap": { + "id": "UrlMap", + "type": "object", + "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.", + "properties": { + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "defaultService": { + "type": "string", + "description": "The URL of the BackendService resource if none of the hostRules match." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "fingerprint": { + "type": "string", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap.", + "format": "byte" + }, + "hostRules": { + "type": "array", + "description": "The list of HostRules to use against the URL.", + "items": { + "$ref": "HostRule" + } + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of the resource. Always compute#urlMaps for url maps.", + "default": "compute#urlMap" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "pathMatchers": { + "type": "array", + "description": "The list of named PathMatchers to use against the URL.", + "items": { + "$ref": "PathMatcher" + } + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + }, + "tests": { + "type": "array", + "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only if all of the test cases pass.", + "items": { + "$ref": "UrlMapTest" + } + } + } + }, + "UrlMapList": { + "id": "UrlMapList", + "type": "object", + "description": "Contains a list of UrlMap resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] Unique identifier for the resource. Set by the server." }, "items": { "type": "array", @@ -6210,8 +6759,19 @@ "type": "string", "description": "[Output Only] URL of the region where the VPN tunnel resides." }, - "selfLink": { - "type": "string", + "remoteTrafficSelector": { + "type": "array", + "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint.", + "items": { + "type": "string" + } + }, + "router": { + "type": "string", + "description": "URL of router resource to be used for dynamic routing." + }, + "selfLink": { + "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, "sharedSecret": { @@ -6349,6 +6909,7 @@ "CLEANUP_FAILED", "DEPRECATED_RESOURCE_USED", "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "FIELD_VALUE_OVERRIDEN", "INJECTED_KERNELS_DEPRECATED", "NEXT_HOP_ADDRESS_NOT_ASSIGNED", "NEXT_HOP_CANNOT_IP_FORWARD", @@ -6377,6 +6938,7 @@ "", "", "", + "", "" ] }, @@ -6501,7 +7063,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -6522,7 +7084,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -6555,7 +7117,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -6596,7 +7158,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -6631,7 +7193,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -6665,7 +7227,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -6686,7 +7248,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -6722,7 +7284,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -6743,7 +7305,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -6776,7 +7338,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -6817,7 +7379,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -6852,7 +7414,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -6886,7 +7448,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -6907,7 +7469,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -6948,7 +7510,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -6991,7 +7553,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7038,7 +7600,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7071,7 +7633,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7104,7 +7666,7 @@ "project": { "type": "string", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7134,7 +7696,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7160,7 +7722,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7181,7 +7743,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7214,7 +7776,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7250,7 +7812,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7281,7 +7843,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7302,7 +7864,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7335,7 +7897,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7368,7 +7930,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7389,7 +7951,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7425,7 +7987,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7446,7 +8008,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7479,7 +8041,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7516,14 +8078,13 @@ "type": "string", "description": "Name of the persistent disk to delete.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" }, "project": { "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7564,7 +8125,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7599,7 +8160,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "sourceImage": { @@ -7638,7 +8199,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7659,7 +8220,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7700,7 +8261,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7748,7 +8309,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7781,7 +8342,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7808,7 +8369,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7834,7 +8395,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7855,7 +8416,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7888,7 +8449,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7924,7 +8485,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7955,7 +8516,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7976,7 +8537,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8009,7 +8570,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8050,7 +8611,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8085,7 +8646,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8119,7 +8680,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8140,7 +8701,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8181,7 +8742,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8229,7 +8790,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8262,7 +8823,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8289,7 +8850,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8315,7 +8876,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8336,7 +8897,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8373,7 +8934,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8406,7 +8967,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8433,7 +8994,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8459,7 +9020,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8480,7 +9041,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8513,7 +9074,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8544,7 +9105,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8565,7 +9126,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8598,7 +9159,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8628,7 +9189,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8653,7 +9214,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8674,7 +9235,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8711,7 +9272,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8744,7 +9305,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8771,7 +9332,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8797,7 +9358,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8818,7 +9379,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8851,7 +9412,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8887,7 +9448,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8927,7 +9488,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8960,7 +9521,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8987,7 +9548,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9013,7 +9574,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9034,7 +9595,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9067,7 +9628,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9103,7 +9664,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9143,7 +9704,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9176,7 +9737,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9212,7 +9773,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9237,7 +9798,7 @@ "parameters": { "family": { "type": "string", - "description": "Name of the image resource to return.", + "description": "Name of the image family to search for.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -9246,7 +9807,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9273,7 +9834,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9298,11 +9859,11 @@ "id": "compute.images.list", "path": "{project}/global/images", "httpMethod": "GET", - "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 7. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.\n\nSee Accessing images for more information.", + "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9323,7 +9884,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9359,7 +9920,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9393,7 +9954,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9414,7 +9975,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9446,7 +10007,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9485,7 +10046,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9527,7 +10088,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9561,7 +10122,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9594,7 +10155,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9615,7 +10176,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9654,7 +10215,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9694,7 +10255,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9736,7 +10297,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "size": { @@ -9783,7 +10344,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9825,7 +10386,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9871,7 +10432,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9905,7 +10466,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9926,7 +10487,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9958,7 +10519,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9997,7 +10558,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10031,7 +10592,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10064,7 +10625,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10085,7 +10646,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10116,7 +10677,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "instanceGroup": { @@ -10143,7 +10704,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10186,7 +10747,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10228,7 +10789,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10275,7 +10836,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10308,7 +10869,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10335,7 +10896,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10361,7 +10922,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10382,7 +10943,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10425,7 +10986,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10461,7 +11022,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10482,7 +11043,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10515,7 +11076,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10559,7 +11120,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10612,7 +11173,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10648,7 +11209,6 @@ "type": "string", "description": "Disk device name to detach.", "required": true, - "pattern": "\\w[\\w.-]{0,254}", "location": "query" }, "instance": { @@ -10662,7 +11222,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10704,7 +11264,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10755,7 +11315,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10790,7 +11350,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10824,7 +11384,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10845,7 +11405,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10886,7 +11446,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10940,7 +11500,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10983,7 +11543,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11027,7 +11587,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11071,7 +11631,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11115,7 +11675,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11159,7 +11719,48 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "startWithEncryptionKey": { + "id": "compute.instances.startWithEncryptionKey", + "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + "httpMethod": "POST", + "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to start.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11175,6 +11776,9 @@ "zone", "instance" ], + "request": { + "$ref": "InstancesStartWithEncryptionKeyRequest" + }, "response": { "$ref": "Operation" }, @@ -11200,7 +11804,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11245,81 +11849,507 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "license" + ], + "response": { + "$ref": "License" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "machineTypes": { + "methods": { + "aggregatedList": { + "id": "compute.machineTypes.aggregatedList", + "path": "{project}/aggregated/machineTypes", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of machine types.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "MachineTypeAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "get": { + "id": "compute.machineTypes.get", + "path": "{project}/zones/{zone}/machineTypes/{machineType}", + "httpMethod": "GET", + "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", + "parameters": { + "machineType": { + "type": "string", + "description": "Name of the machine type to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "machineType" + ], + "response": { + "$ref": "MachineType" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "id": "compute.machineTypes.list", + "path": "{project}/zones/{zone}/machineTypes", + "httpMethod": "GET", + "description": "Retrieves a list of machine types available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone" + ], + "response": { + "$ref": "MachineTypeList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "networks": { + "methods": { + "delete": { + "id": "compute.networks.delete", + "path": "{project}/global/networks/{network}", + "httpMethod": "DELETE", + "description": "Deletes the specified network.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.networks.get", + "path": "{project}/global/networks/{network}", + "httpMethod": "GET", + "description": "Returns the specified network. Get a list of available networks by making a list() request.", + "parameters": { + "network": { + "type": "string", + "description": "Name of the network to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "network" + ], + "response": { + "$ref": "Network" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.networks.insert", + "path": "{project}/global/networks", + "httpMethod": "POST", + "description": "Creates a network in the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Network" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.networks.list", + "path": "{project}/global/networks", + "httpMethod": "GET", + "description": "Retrieves the list of networks available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "NetworkList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "projects": { + "methods": { + "get": { + "id": "compute.projects.get", + "path": "{project}", + "httpMethod": "GET", + "description": "Returns the specified Project resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "Project" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "moveDisk": { + "id": "compute.projects.moveDisk", + "path": "{project}/moveDisk", + "httpMethod": "POST", + "description": "Moves a persistent disk from one zone to another.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "DiskMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "moveInstance": { + "id": "compute.projects.moveInstance", + "path": "{project}/moveInstance", + "httpMethod": "POST", + "description": "Moves an instance and its attached persistent disks from one zone to another.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "InstanceMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setCommonInstanceMetadata": { + "id": "compute.projects.setCommonInstanceMetadata", + "path": "{project}/setCommonInstanceMetadata", + "httpMethod": "POST", + "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "request": { + "$ref": "Metadata" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "setUsageExportBucket": { + "id": "compute.projects.setUsageExportBucket", + "path": "{project}/setUsageExportBucket", + "httpMethod": "POST", + "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ - "project", - "license" + "project" ], + "request": { + "$ref": "UsageExportLocation" + }, "response": { - "$ref": "License" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" ] } } }, - "machineTypes": { + "regionOperations": { "methods": { - "aggregatedList": { - "id": "compute.machineTypes.aggregatedList", - "path": "{project}/aggregated/machineTypes", - "httpMethod": "GET", - "description": "Retrieves an aggregated list of machine types.", + "delete": { + "id": "compute.regionOperations.delete", + "path": "{project}/regions/{region}/operations/{operation}", + "httpMethod": "DELETE", + "description": "Deletes the specified region-specific Operations resource.", "parameters": { - "filter": { - "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - "default": "500", - "format": "uint32", - "minimum": "0", - "maximum": "500", - "location": "query" - }, - "pageToken": { + "operation": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "Name of the Operations resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, "project": { "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "region", + "operation" ], - "response": { - "$ref": "MachineTypeAggregatedList" - }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "get": { - "id": "compute.machineTypes.get", - "path": "{project}/zones/{zone}/machineTypes/{machineType}", + "id": "compute.regionOperations.get", + "path": "{project}/regions/{region}/operations/{operation}", "httpMethod": "GET", - "description": "Returns the specified machine type. Get a list of available machine types by making a list() request.", + "description": "Retrieves the specified region-specific Operations resource.", "parameters": { - "machineType": { + "operation": { "type": "string", - "description": "Name of the machine type to return.", + "description": "Name of the Operations resource to return.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11328,12 +12358,12 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "zone": { + "region": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the region for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11341,11 +12371,11 @@ }, "parameterOrder": [ "project", - "zone", - "machineType" + "region", + "operation" ], "response": { - "$ref": "MachineType" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11354,14 +12384,14 @@ ] }, "list": { - "id": "compute.machineTypes.list", - "path": "{project}/zones/{zone}/machineTypes", + "id": "compute.regionOperations.list", + "path": "{project}/regions/{region}/operations", "httpMethod": "GET", - "description": "Retrieves a list of machine types available to the specified project.", + "description": "Retrieves a list of Operation resources contained within the specified region.", "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11382,12 +12412,12 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "zone": { + "region": { "type": "string", - "description": "The name of the zone for this request.", + "description": "Name of the region for this request.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11395,10 +12425,10 @@ }, "parameterOrder": [ "project", - "zone" + "region" ], "response": { - "$ref": "MachineTypeList" + "$ref": "OperationList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11408,68 +12438,35 @@ } } }, - "networks": { + "regions": { "methods": { - "delete": { - "id": "compute.networks.delete", - "path": "{project}/global/networks/{network}", - "httpMethod": "DELETE", - "description": "Deletes the specified network.", - "parameters": { - "network": { - "type": "string", - "description": "Name of the network to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project", - "network" - ], - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, "get": { - "id": "compute.networks.get", - "path": "{project}/global/networks/{network}", + "id": "compute.regions.get", + "path": "{project}/regions/{region}", "httpMethod": "GET", - "description": "Returns the specified network. Get a list of available networks by making a list() request.", + "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", "parameters": { - "network": { + "project": { "type": "string", - "description": "Name of the network to return.", + "description": "Project ID for this request.", "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, - "project": { + "region": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the region resource to return.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ "project", - "network" + "region" ], "response": { - "$ref": "Network" + "$ref": "Region" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11477,43 +12474,15 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, - "insert": { - "id": "compute.networks.insert", - "path": "{project}/global/networks", - "httpMethod": "POST", - "description": "Creates a network in the specified project using the data included in the request.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "Network" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, "list": { - "id": "compute.networks.list", - "path": "{project}/global/networks", + "id": "compute.regions.list", + "path": "{project}/regions", "httpMethod": "GET", - "description": "Retrieves the list of networks available to the specified project.", + "description": "Retrieves the list of region resources available to the specified project.", "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11534,7 +12503,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -11542,7 +12511,7 @@ "project" ], "response": { - "$ref": "NetworkList" + "$ref": "RegionList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -11552,110 +12521,86 @@ } } }, - "projects": { + "routers": { "methods": { - "get": { - "id": "compute.projects.get", - "path": "{project}", + "aggregatedList": { + "id": "compute.routers.aggregatedList", + "path": "{project}/aggregated/routers", "httpMethod": "GET", - "description": "Returns the specified Project resource.", - "parameters": { - "project": { - "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "response": { - "$ref": "Project" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" - ] - }, - "moveDisk": { - "id": "compute.projects.moveDisk", - "path": "{project}/moveDisk", - "httpMethod": "POST", - "description": "Moves a persistent disk from one zone to another.", + "description": "Retrieves an aggregated list of routers.", "parameters": { - "project": { + "filter": { "type": "string", - "description": "Project ID for this request.", - "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "location": "path" - } - }, - "parameterOrder": [ - "project" - ], - "request": { - "$ref": "DiskMoveRequest" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - }, - "moveInstance": { - "id": "compute.projects.moveInstance", - "path": "{project}/moveInstance", - "httpMethod": "POST", - "description": "Moves an instance and its attached persistent disks from one zone to another.", - "parameters": { + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, "project": { "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, "parameterOrder": [ "project" ], - "request": { - "$ref": "InstanceMoveRequest" - }, "response": { - "$ref": "Operation" + "$ref": "RouterAggregatedList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "setCommonInstanceMetadata": { - "id": "compute.projects.setCommonInstanceMetadata", - "path": "{project}/setCommonInstanceMetadata", - "httpMethod": "POST", - "description": "Sets metadata common to all instances within the specified project using the data included in the request.", + "delete": { + "id": "compute.routers.delete", + "path": "{project}/regions/{region}/routers/{router}", + "httpMethod": "DELETE", + "description": "Deletes the specified Router resource.", "parameters": { "project": { "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "region", + "router" ], - "request": { - "$ref": "Metadata" - }, "response": { "$ref": "Operation" }, @@ -11664,59 +12609,59 @@ "https://www.googleapis.com/auth/compute" ] }, - "setUsageExportBucket": { - "id": "compute.projects.setUsageExportBucket", - "path": "{project}/setUsageExportBucket", - "httpMethod": "POST", - "description": "Enables the usage export feature and sets the usage export bucket where reports are stored. If you provide an empty request body using this method, the usage export feature will be disabled.", + "get": { + "id": "compute.routers.get", + "path": "{project}/regions/{region}/routers/{router}", + "httpMethod": "GET", + "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", "parameters": { "project": { "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "region", + "router" ], - "request": { - "$ref": "UsageExportLocation" - }, "response": { - "$ref": "Operation" + "$ref": "Router" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_only", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regionOperations": { - "methods": { - "delete": { - "id": "compute.regionOperations.delete", - "path": "{project}/regions/{region}/operations/{operation}", - "httpMethod": "DELETE", - "description": "Deletes the specified region-specific Operations resource.", + }, + "getRouterStatus": { + "id": "compute.routers.getRouterStatus", + "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + "httpMethod": "GET", + "description": "Retrieves runtime information of the specified router.", "parameters": { - "operation": { - "type": "string", - "description": "Name of the Operations resource to delete.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -11725,36 +12670,40 @@ "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to query.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" } }, "parameterOrder": [ "project", "region", - "operation" + "router" ], + "response": { + "$ref": "RouterStatusResponse" + }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" ] }, - "get": { - "id": "compute.regionOperations.get", - "path": "{project}/regions/{region}/operations/{operation}", - "httpMethod": "GET", - "description": "Retrieves the specified region-specific Operations resource.", + "insert": { + "id": "compute.routers.insert", + "path": "{project}/regions/{region}/routers", + "httpMethod": "POST", + "description": "Creates a Router resource in the specified project and region using the data included in the request.", "parameters": { - "operation": { - "type": "string", - "description": "Name of the Operations resource to return.", - "required": true, - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "location": "path" - }, "project": { "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -11767,27 +12716,28 @@ }, "parameterOrder": [ "project", - "region", - "operation" + "region" ], + "request": { + "$ref": "Router" + }, "response": { "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, "list": { - "id": "compute.regionOperations.list", - "path": "{project}/regions/{region}/operations", + "id": "compute.routers.list", + "path": "{project}/regions/{region}/routers", "httpMethod": "GET", - "description": "Retrieves a list of Operation resources contained within the specified region.", + "description": "Retrieves a list of Router resources available to the specified project.", "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11808,7 +12758,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -11824,34 +12774,37 @@ "region" ], "response": { - "$ref": "OperationList" + "$ref": "RouterList" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] - } - } - }, - "regions": { - "methods": { - "get": { - "id": "compute.regions.get", - "path": "{project}/regions/{region}", - "httpMethod": "GET", - "description": "Returns the specified Region resource. Get a list of available regions by making a list() request.", + }, + "patch": { + "id": "compute.routers.patch", + "path": "{project}/regions/{region}/routers/{router}", + "httpMethod": "PATCH", + "description": "Updates the entire content of the Router resource. This method supports patch semantics.", "parameters": { "project": { "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { "type": "string", - "description": "Name of the region resource to return.", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to update.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -11859,61 +12812,108 @@ }, "parameterOrder": [ "project", - "region" + "region", + "router" ], + "request": { + "$ref": "Router" + }, "response": { - "$ref": "Region" + "$ref": "Operation" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/compute.readonly" + "https://www.googleapis.com/auth/compute" ] }, - "list": { - "id": "compute.regions.list", - "path": "{project}/regions", - "httpMethod": "GET", - "description": "Retrieves the list of region resources available to the specified project.", + "preview": { + "id": "compute.routers.preview", + "path": "{project}/regions/{region}/routers/{router}/preview", + "httpMethod": "POST", + "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", "parameters": { - "filter": { + "project": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", - "location": "query" - }, - "maxResults": { - "type": "integer", - "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", - "default": "500", - "format": "uint32", - "minimum": "0", - "maximum": "500", - "location": "query" + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" }, - "pageToken": { + "region": { "type": "string", - "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", - "location": "query" + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" }, - "project": { + "router": { "type": "string", - "description": "Project ID for this request.", + "description": "Name of the Router resource to query.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" } }, "parameterOrder": [ - "project" + "project", + "region", + "router" ], + "request": { + "$ref": "Router" + }, "response": { - "$ref": "RegionList" + "$ref": "RoutersPreviewResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "update": { + "id": "compute.routers.update", + "path": "{project}/regions/{region}/routers/{router}", + "httpMethod": "PUT", + "description": "Updates the entire content of the Router resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "router" + ], + "request": { + "$ref": "Router" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -11929,7 +12929,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "route": { @@ -11962,7 +12962,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "route": { @@ -11996,7 +12996,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12022,7 +13022,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12043,7 +13043,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12073,7 +13073,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "snapshot": { @@ -12106,7 +13106,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "snapshot": { @@ -12138,7 +13138,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12159,7 +13159,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12189,7 +13189,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "sslCertificate": { @@ -12222,7 +13222,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "sslCertificate": { @@ -12256,7 +13256,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12282,7 +13282,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12303,7 +13303,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12331,7 +13331,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12352,7 +13352,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12378,7 +13378,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12419,7 +13419,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12461,7 +13461,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12495,7 +13495,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12516,7 +13516,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12554,7 +13554,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpProxy": { @@ -12587,7 +13587,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpProxy": { @@ -12621,7 +13621,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12647,7 +13647,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12668,7 +13668,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12694,7 +13694,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpProxy": { @@ -12734,7 +13734,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpsProxy": { @@ -12767,7 +13767,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpsProxy": { @@ -12801,7 +13801,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12827,7 +13827,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12848,7 +13848,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12874,7 +13874,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpsProxy": { @@ -12910,7 +13910,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpsProxy": { @@ -12948,7 +13948,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12969,7 +13969,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12995,7 +13995,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetInstance": { @@ -13036,7 +14036,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetInstance": { @@ -13078,7 +14078,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -13112,7 +14112,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13133,7 +14133,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -13171,7 +14171,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13215,7 +14215,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13257,7 +14257,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13278,7 +14278,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13304,7 +14304,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13345,7 +14345,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13387,7 +14387,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13432,7 +14432,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13466,7 +14466,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13487,7 +14487,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13521,7 +14521,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13565,7 +14565,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13615,7 +14615,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13661,7 +14661,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13682,7 +14682,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13708,7 +14708,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13749,7 +14749,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13791,7 +14791,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13825,7 +14825,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13846,7 +14846,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13884,7 +14884,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -13917,7 +14917,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -13951,7 +14951,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13969,6 +14969,42 @@ "https://www.googleapis.com/auth/compute" ] }, + "invalidateCache": { + "id": "compute.urlMaps.invalidateCache", + "path": "{project}/global/urlMaps/{urlMap}/invalidateCache", + "httpMethod": "POST", + "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "request": { + "$ref": "CacheInvalidationRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { "id": "compute.urlMaps.list", "path": "{project}/global/urlMaps", @@ -13977,7 +15013,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13998,7 +15034,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -14024,7 +15060,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -14060,7 +15096,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -14096,7 +15132,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -14134,7 +15170,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14155,7 +15191,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -14181,7 +15217,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14222,7 +15258,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14264,7 +15300,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14298,7 +15334,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14319,7 +15355,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14364,7 +15400,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -14402,7 +15438,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -14435,7 +15471,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14456,7 +15492,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -14494,7 +15530,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -14526,7 +15562,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14547,7 +15583,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index cf7442c2fdbd..188ce80cb440 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -94,6 +94,7 @@ func New(client *http.Client) (*Service, error) { s.Projects = NewProjectsService(s) s.RegionOperations = NewRegionOperationsService(s) s.Regions = NewRegionsService(s) + s.Routers = NewRoutersService(s) s.Routes = NewRoutesService(s) s.Snapshots = NewSnapshotsService(s) s.SslCertificates = NewSslCertificatesService(s) @@ -161,6 +162,8 @@ type Service struct { Regions *RegionsService + Routers *RoutersService + Routes *RoutesService Snapshots *SnapshotsService @@ -402,6 +405,15 @@ type RegionsService struct { s *Service } +func NewRoutersService(s *Service) *RoutersService { + rs := &RoutersService{s: s} + return rs +} + +type RoutersService struct { + s *Service +} + func NewRoutesService(s *Service) *RoutesService { rs := &RoutesService{s: s} return rs @@ -520,7 +532,7 @@ type ZonesService struct { } // AccessConfig: An access configuration attached to an instance's -// network interface. +// network interface. Only one access config per instance is supported. type AccessConfig struct { // Kind: [Output Only] Type of the resource. Always compute#accessConfig // for access configs. @@ -747,6 +759,7 @@ type AddressesScopedListWarning struct { // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -837,6 +850,29 @@ type AttachedDisk struct { // persistent disks. DeviceName string `json:"deviceName,omitempty"` + // DiskEncryptionKey: Encrypts or decrypts a disk using a + // customer-supplied encryption key. + // + // If you are creating a new disk, this field encrypts the new disk + // using an encryption key that you provide. If you are attaching an + // existing disk that is already encrypted, this field decrypts the disk + // using the customer-supplied encryption key. + // + // If you encrypt a disk using a customer-supplied key, you must provide + // the same key again when you attempt to use this resource at a later + // time. For example, you must provide the key when you create a + // snapshot or an image from the disk or when you attach the disk to a + // virtual machine instance. + // + // If you do not provide an encryption key, then the disk will be + // encrypted using an automatically generated key and you do not need to + // provide a key to use the disk later. + // + // Instance templates do not store customer-supplied encryption keys, so + // you cannot use your own keys to encrypt disks in a managed instance + // group. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + // Index: Assigns a zero-based index to this disk, where 0 is reserved // for the boot disk. For example, if you have many disks attached to an // instance, each disk would have a unique index number. If not @@ -882,7 +918,8 @@ type AttachedDisk struct { // Source: Specifies a valid partial or full URL to an existing // Persistent Disk resource. This field is only applicable for - // persistent disks. + // persistent disks. Note that for InstanceTemplate, it is just disk + // name, not URL for the disk. Source string `json:"source,omitempty"` // Type: Specifies the type of the disk, either SCRATCH or PERSISTENT. @@ -937,7 +974,8 @@ type AttachedDiskInitializeParams struct { // - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType // - projects/project/zones/zone/diskTypes/diskType - // - zones/zone/diskTypes/diskType + // - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this + // is the name of the disk type, not URL. DiskType string `json:"diskType,omitempty"` // SourceImage: The source image used to create this disk. If the source @@ -968,6 +1006,15 @@ type AttachedDiskInitializeParams struct { // global/images/family/my-private-family SourceImage string `json:"sourceImage,omitempty"` + // SourceImageEncryptionKey: The customer-supplied encryption key of the + // source image. Required if the source image is protected by a + // customer-supplied encryption key. + // + // Instance templates do not store customer-supplied encryption keys, so + // you cannot create disks for instances in a managed instance group if + // the source images are encrypted with your own keys. + SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` + // ForceSendFields is a list of field names (e.g. "DiskName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -994,7 +1041,7 @@ type Autoscaler struct { // loadBalancingUtilization. // // If none of these are specified, the default will be to autoscale - // based on cpuUtilization to 0.8 or 80%. + // based on cpuUtilization to 0.6 or 60%. AutoscalingPolicy *AutoscalingPolicy `json:"autoscalingPolicy,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -1029,7 +1076,8 @@ type Autoscaler struct { // scale. Target string `json:"target,omitempty"` - // Zone: [Output Only] URL of the zone where the instance group resides. + // Zone: [Output Only] URL of the zone where the instance group resides + // (for autoscalers living in zonal scope). Zone string `json:"zone,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -1171,6 +1219,7 @@ type AutoscalersScopedListWarning struct { // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -1297,7 +1346,7 @@ func (s *AutoscalingPolicy) MarshalJSON() ([]byte, error) { type AutoscalingPolicyCpuUtilization struct { // UtilizationTarget: The target CPU utilization that the autoscaler // should maintain. Must be a float value in the range (0, 1]. If not - // specified, the default is 0.8. + // specified, the default is 0.6. // // If the CPU level is below the target utilization, the autoscaler // scales down the number of instances until it reaches the minimum @@ -1408,6 +1457,8 @@ type Backend struct { // global HTTP(S) load balancing, the default is UTILIZATION. Valid // values are UTILIZATION and RATE. // + // This cannot be used for internal load balancing. + // // Possible values: // "RATE" // "UTILIZATION" @@ -1419,6 +1470,8 @@ type Backend struct { // (depending on balancingMode). A setting of 0 means the group is // completely drained, offering 0% of its available CPU or RPS. Valid // range is [0.0,1.0]. + // + // This cannot be used for internal load balancing. CapacityScaler float64 `json:"capacityScaler,omitempty"` // Description: An optional description of this resource. Provide this @@ -1434,23 +1487,33 @@ type Backend struct { // // Note that you must specify an Instance Group resource using the // fully-qualified URL, rather than a partial URL. + // + // When the BackendService has load balancing scheme INTERNAL, the + // instance group must be in a zone within the same region as the + // BackendService. Group string `json:"group,omitempty"` // MaxRate: The max requests per second (RPS) of the group. Can be used // with either RATE or UTILIZATION balancing modes, but required if RATE // mode. For RATE mode, either maxRate or maxRatePerInstance must be // set. + // + // This cannot be used for internal load balancing. MaxRate int64 `json:"maxRate,omitempty"` // MaxRatePerInstance: The max requests per second (RPS) that a single // backend instance can handle.This is used to calculate the capacity of // the group. Can be used in either balancing mode. For RATE mode, // either maxRate or maxRatePerInstance must be set. + // + // This cannot be used for internal load balancing. MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` // MaxUtilization: Used when balancingMode is UTILIZATION. This ratio // defines the CPU utilization target for the group. The default is 0.8. // Valid range is [0.0, 1.0]. + // + // This cannot be used for internal load balancing. MaxUtilization float64 `json:"maxUtilization,omitempty"` // ForceSendFields is a list of field names (e.g. "BalancingMode") to @@ -1471,6 +1534,14 @@ func (s *Backend) MarshalJSON() ([]byte, error) { // BackendService: A BackendService resource. This resource defines a // group of backend virtual machines and their serving capacity. type BackendService struct { + // AffinityCookieTtlSec: Lifetime of cookies in seconds if + // session_affinity is GENERATED_COOKIE. If set to 0, the cookie is + // non-persistent and lasts only until the end of the browser session + // (or equivalent). The maximum allowed value for TTL is one day. + // + // When the load balancing scheme is INTERNAL, this field is not used. + AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` + // Backends: The list of backends that serve this BackendService. Backends []*Backend `json:"backends,omitempty"` @@ -1482,6 +1553,11 @@ type BackendService struct { // property when you create the resource. Description string `json:"description,omitempty"` + // EnableCDN: If true, enable Cloud CDN for this BackendService. + // + // When the load balancing scheme is INTERNAL, this field is not used. + EnableCDN bool `json:"enableCDN,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents // stored in this object. This field is used in optimistic locking. This // field will be ignored when inserting a BackendService. An up-to-date @@ -1492,6 +1568,9 @@ type BackendService struct { // HttpsHealthCheck resource for health checking this BackendService. // Currently at most one health check can be specified, and a health // check is required. + // + // For internal load balancing, a URL to a HealthCheck resource must be + // specified instead. HealthChecks []string `json:"healthChecks,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -1513,16 +1592,25 @@ type BackendService struct { // Port: Deprecated in favor of portName. The TCP port to connect on the // backend. The default value is 80. + // + // This cannot be used for internal load balancing. Port int64 `json:"port,omitempty"` // PortName: Name of backend port. The same name should appear in the - // instance groups referenced by this service. Required. + // instance groups referenced by this service. Required when the load + // balancing scheme is EXTERNAL. + // + // When the load balancing scheme is INTERNAL, this field is not used. PortName string `json:"portName,omitempty"` // Protocol: The protocol this BackendService uses to communicate with // backends. // - // Possible values are HTTP, HTTPS, HTTP2, TCP and SSL. + // Possible values are HTTP, HTTPS, HTTP2, TCP and SSL. The default is + // HTTP. + // + // For internal load balancing, the possible values are TCP and UDP, and + // the default is TCP. // // Possible values: // "HTTP" @@ -1537,6 +1625,24 @@ type BackendService struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SessionAffinity: Type of session affinity to use. The default is + // NONE. + // + // When the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, + // or GENERATED_COOKIE. + // + // When the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, + // CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO. + // + // When the protocol is UDP, this field is not used. + // + // Possible values: + // "CLIENT_IP" + // "CLIENT_IP_PROTO" + // "GENERATED_COOKIE" + // "NONE" + SessionAffinity string `json:"sessionAffinity,omitempty"` + // TimeoutSec: How many seconds to wait for the backend before // considering it a failed request. Default is 30 seconds. TimeoutSec int64 `json:"timeoutSec,omitempty"` @@ -1545,12 +1651,13 @@ type BackendService struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Backends") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AffinityCookieTtlSec") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` } @@ -1588,8 +1695,8 @@ func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { // BackendServiceList: Contains a list of BackendService resources. type BackendServiceList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` // Items: A list of BackendService resources. @@ -1599,12 +1706,8 @@ type BackendServiceList struct { // compute#backendServiceList for lists of backend services. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. + // NextPageToken: [Output Only] A token used to continue a truncated + // list request. NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: [Output Only] Server-defined URL for this resource. @@ -1629,6 +1732,74 @@ func (s *BackendServiceList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } +type CacheInvalidationRule struct { + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Path") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CacheInvalidationRule) MarshalJSON() ([]byte, error) { + type noMethod CacheInvalidationRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CustomerEncryptionKey: Represents a customer-supplied encryption key +type CustomerEncryptionKey struct { + // RawKey: Specifies a 256-bit customer-supplied encryption key, encoded + // in RFC 4648 base64 to either encrypt or decrypt this resource. + RawKey string `json:"rawKey,omitempty"` + + // Sha256: [Output only] The RFC 4648 base64 encoded SHA-256 hash of the + // customer-supplied encryption key that protects this resource. + Sha256 string `json:"sha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RawKey") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CustomerEncryptionKey) MarshalJSON() ([]byte, error) { + type noMethod CustomerEncryptionKey + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type CustomerEncryptionKeyProtectedDisk struct { + // DiskEncryptionKey: Decrypts data associated with the disk with a + // customer-supplied encryption key. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + + // Source: Specifies a valid partial or full URL to an existing + // Persistent Disk resource. This field is only applicable for + // persistent disks. + Source string `json:"source,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskEncryptionKey") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CustomerEncryptionKeyProtectedDisk) MarshalJSON() ([]byte, error) { + type noMethod CustomerEncryptionKeyProtectedDisk + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + // DeprecationStatus: Deprecation status for a public resource. type DeprecationStatus struct { // Deleted: An optional RFC3339 timestamp on or after which the @@ -1686,6 +1857,22 @@ type Disk struct { // property when you create the resource. Description string `json:"description,omitempty"` + // DiskEncryptionKey: Encrypts the disk using a customer-supplied + // encryption key. + // + // After you encrypt a disk with a customer-supplied key, you must + // provide the same key if you use the disk later (e.g. to create a disk + // snapshot or an image, or to attach the disk to a virtual + // machine). + // + // Customer-supplied encryption keys do not protect access to metadata + // of the disk. + // + // If you do not provide an encryption key when creating the disk, then + // the disk will be encrypted using an automatically generated key and + // you do not need to provide a key to use the disk later. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -1702,7 +1889,7 @@ type Disk struct { // text format. LastDetachTimestamp string `json:"lastDetachTimestamp,omitempty"` - // Licenses: [Output Only] Any applicable publicly visible licenses. + // Licenses: Any applicable publicly visible licenses. Licenses []string `json:"licenses,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -1759,6 +1946,11 @@ type Disk struct { // global/images/family/my-private-family SourceImage string `json:"sourceImage,omitempty"` + // SourceImageEncryptionKey: The customer-supplied encryption key of the + // source image. Required if the source image is protected by a + // customer-supplied encryption key. + SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` + // SourceImageId: [Output Only] The ID value of the image used to create // this disk. This value identifies the exact image that was used to // create this persistent disk. For example, if you created the @@ -1776,6 +1968,11 @@ type Disk struct { // - global/snapshots/snapshot SourceSnapshot string `json:"sourceSnapshot,omitempty"` + // SourceSnapshotEncryptionKey: The customer-supplied encryption key of + // the source snapshot. Required if the source snapshot is protected by + // a customer-supplied encryption key. + SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` + // SourceSnapshotId: [Output Only] The unique ID of the snapshot used to // create this disk. This value identifies the exact snapshot that was // used to create this persistent disk. For example, if you created the @@ -2119,6 +2316,7 @@ type DiskTypesScopedListWarning struct { // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -2240,6 +2438,7 @@ type DisksScopedListWarning struct { // "CLEANUP_FAILED" // "DEPRECATED_RESOURCE_USED" // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "FIELD_VALUE_OVERRIDEN" // "INJECTED_KERNELS_DEPRECATED" // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" // "NEXT_HOP_CANNOT_IP_FORWARD" @@ -2309,9 +2508,9 @@ func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { // Firewall: Represents a Firewall resource. type Firewall struct { - // Allowed: The list of rules specified by this firewall. Each rule - // specifies a protocol and port-range tuple that describes a permitted - // connection. + // Allowed: The list of ALLOW rules specified by this firewall. Each + // rule specifies a protocol and port-range tuple that describes a + // permitted connection. Allowed []*FirewallAllowed `json:"allowed,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -2355,23 +2554,25 @@ type Firewall struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SourceRanges: The IP address blocks that this rule applies to, - // expressed in CIDR format. One or both of sourceRanges and sourceTags - // may be set. - // - // If both properties are set, an inbound connection is allowed if the - // range matches the sourceRanges OR the tag of the source matches the - // sourceTags property. The connection does not need to match both - // properties. + // SourceRanges: If source ranges are specified, the firewall will apply + // only to traffic that has source IP address in these ranges. These + // ranges must be expressed in CIDR format. One or both of sourceRanges + // and sourceTags may be set. If both properties are set, the firewall + // will apply to traffic that has source IP address within sourceRanges + // OR the source IP that belongs to a tag listed in the sourceTags + // property. The connection does not need to match both properties for + // the firewall to apply. SourceRanges []string `json:"sourceRanges,omitempty"` - // SourceTags: A list of instance tags which this rule applies to. One - // or both of sourceRanges and sourceTags may be set. - // - // If both properties are set, an inbound connection is allowed if the - // range matches the sourceRanges OR the tag of the source matches the - // sourceTags property. The connection does not need to match both - // properties. + // SourceTags: If source tags are specified, the firewall will apply + // only to traffic with source IP that belongs to a tag listed in source + // tags. Source tags cannot be used to control traffic to an instance's + // external IP address. Because tags are associated with an instance, + // not an IP address. One or both of sourceRanges and sourceTags may be + // set. If both properties are set, the firewall will apply to traffic + // that has source IP address within sourceRanges OR the source IP that + // belongs to a tag listed in the sourceTags property. The connection + // does not need to match both properties for the firewall to apply. SourceTags []string `json:"sourceTags,omitempty"` // TargetTags: A list of instance tags indicating sets of instances @@ -2400,16 +2601,16 @@ func (s *Firewall) MarshalJSON() ([]byte, error) { } type FirewallAllowed struct { - // IPProtocol: The IP protocol that is allowed for this rule. The - // protocol type is required when creating a firewall rule. This value - // can either be one of the following well known protocol strings (tcp, - // udp, icmp, esp, ah, sctp), or the IP protocol number. + // IPProtocol: The IP protocol to which this rule applies. The protocol + // type is required when creating a firewall rule. This value can either + // be one of the following well known protocol strings (tcp, udp, icmp, + // esp, ah, sctp), or the IP protocol number. IPProtocol string `json:"IPProtocol,omitempty"` - // Ports: An optional list of ports which are allowed. This field is - // only applicable for UDP or TCP protocol. Each entry must be either an - // integer or a range. If not specified, connections through any port - // are allowed + // Ports: An optional list of ports to which this rule applies. This + // field is only applicable for UDP or TCP protocol. Each entry must be + // either an integer or a range. If not specified, this rule applies to + // connections through any port. // // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. Ports []string `json:"ports,omitempty"` @@ -2476,17 +2677,29 @@ func (s *FirewallList) MarshalJSON() ([]byte, error) { // specifies which pool of target virtual machines to forward a packet // to if it matches the given [IPAddress, IPProtocol, portRange] tuple. type ForwardingRule struct { - // IPAddress: Value of the reserved IP address that this forwarding rule - // is serving on behalf of. For global forwarding rules, the address - // must be a global IP; for regional forwarding rules, the address must - // live in the same region as the forwarding rule. If left empty - // (default value), an ephemeral IP from the same scope (global or - // regional) will be assigned. + // IPAddress: The IP address that this forwarding rule is serving on + // behalf of. + // + // For global forwarding rules, the address must be a global IP; for + // regional forwarding rules, the address must live in the same region + // as the forwarding rule. By default, this field is empty and an + // ephemeral IP from the same scope (global or regional) will be + // assigned. + // + // When the load balancing scheme is INTERNAL, this can only be an RFC + // 1918 IP address belonging to the network/subnetwork configured for + // the forwarding rule. A reserved address cannot be used. If the field + // is empty, the IP address will be automatically allocated from the + // internal IP range of the subnetwork or network configured for this + // forwarding rule. IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. Valid options // are TCP, UDP, ESP, AH, SCTP or ICMP. // + // When the load balancing scheme is INTERNAL= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 858537e00b07..ad059b5d56d3 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -149,12 +149,12 @@ func IsNotModified(err error) bool { // CheckMediaResponse returns an error (of type *Error) if the response // status code is not 2xx. Unlike CheckResponse it does not assume the // body is a JSON error document. +// It is the caller's responsibility to close res.Body. func CheckMediaResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) - res.Body.Close() return &Error{ Code: res.StatusCode, Body: string(slurp), @@ -278,41 +278,15 @@ func ResolveRelative(basestr, relstr string) string { return us } -// has4860Fix is whether this Go environment contains the fix for -// http://golang.org/issue/4860 -var has4860Fix bool - -// init initializes has4860Fix by checking the behavior of the net/http package. -func init() { - r := http.Request{ - URL: &url.URL{ - Scheme: "http", - Opaque: "//opaque", - }, - } - b := &bytes.Buffer{} - r.Write(b) - has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http")) -} - -// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it -// don't alter any hex-escaped characters in u.Path. -func SetOpaque(u *url.URL) { - u.Opaque = "//" + u.Host + u.Path - if !has4860Fix { - u.Opaque = u.Scheme + ":" + u.Opaque - } -} - // Expand subsitutes any {encoded} strings in the URL passed in using // the map supplied. // // This calls SetOpaque to avoid encoding of the parameters in the URL path. func Expand(u *url.URL, expansions map[string]string) { - expanded, err := uritemplates.Expand(u.Path, expansions) + escaped, unescaped, err := uritemplates.Expand(u.Path, expansions) if err == nil { - u.Path = expanded - SetOpaque(u) + u.Path = unescaped + u.RawPath = escaped } } diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go index 7c103ba1386d..63bf0538301a 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go @@ -34,11 +34,37 @@ func pctEncode(src []byte) []byte { return dst } -func escape(s string, allowReserved bool) string { +// pairWriter is a convenience struct which allows escaped and unescaped +// versions of the template to be written in parallel. +type pairWriter struct { + escaped, unescaped bytes.Buffer +} + +// Write writes the provided string directly without any escaping. +func (w *pairWriter) Write(s string) { + w.escaped.WriteString(s) + w.unescaped.WriteString(s) +} + +// Escape writes the provided string, escaping the string for the +// escaped output. +func (w *pairWriter) Escape(s string, allowReserved bool) { + w.unescaped.WriteString(s) if allowReserved { - return string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) } - return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) +} + +// Escaped returns the escaped string. +func (w *pairWriter) Escaped() string { + return w.escaped.String() +} + +// Unescaped returns the unescaped string. +func (w *pairWriter) Unescaped() string { + return w.unescaped.String() } // A uriTemplate is a parsed representation of a URI template. @@ -170,18 +196,20 @@ func parseTerm(term string) (result templateTerm, err error) { return result, err } -// Expand expands a URI template with a set of values to produce a string. -func (t *uriTemplate) Expand(values map[string]string) string { - var buf bytes.Buffer +// Expand expands a URI template with a set of values to produce the +// resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) { + var w pairWriter for _, p := range t.parts { - p.expand(&buf, values) + p.expand(&w, values) } - return buf.String() + return w.Escaped(), w.Unescaped() } -func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { +func (tp *templatePart) expand(w *pairWriter, values map[string]string) { if len(tp.raw) > 0 { - buf.WriteString(tp.raw) + w.Write(tp.raw) return } var first = true @@ -191,30 +219,30 @@ func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) { continue } if first { - buf.WriteString(tp.first) + w.Write(tp.first) first = false } else { - buf.WriteString(tp.sep) + w.Write(tp.sep) } - tp.expandString(buf, term, value) + tp.expandString(w, term, value) } } -func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { +func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) { if tp.named { - buf.WriteString(name) + w.Write(name) if empty { - buf.WriteString(tp.ifemp) + w.Write(tp.ifemp) } else { - buf.WriteString("=") + w.Write("=") } } } -func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { +func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) { if len(s) > t.truncate && t.truncate > 0 { s = s[:t.truncate] } - tp.expandName(buf, t.name, len(s) == 0) - buf.WriteString(escape(s, tp.allowReserved)) + tp.expandName(w, t.name, len(s) == 0) + w.Escape(s, tp.allowReserved) } diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go index eff260a6925f..2e70b81543d0 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go +++ b/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go @@ -4,10 +4,14 @@ package uritemplates -func Expand(path string, values map[string]string) (string, error) { +// Expand parses then expands a URI template with a set of values to produce +// the resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func Expand(path string, values map[string]string) (escaped, unescaped string, err error) { template, err := parse(path) if err != nil { - return "", err + return "", "", err } - return template.Expand(values), nil + escaped, unescaped = template.Expand(values) + return escaped, unescaped, nil } diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index d2bf75880149..8d9118b54754 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"jQLIOHBVnDZie4rQHGH1WJF-INE/Wi5axMdjgKCnqJxaxfzKML8o57Y\"", + "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/gtcWNCypj7VTDxrk1rvvuniNHZo\"", "discoveryVersion": "v1", "id": "storage:v1", "name": "storage", "version": "v1", - "revision": "20160414", + "revision": "20160802", "title": "Cloud Storage JSON API", "description": "Stores and retrieves potentially large, immutable data objects.", "ownerDomain": "google.com", @@ -150,6 +150,15 @@ "$ref": "ObjectAccessControl" } }, + "encryption": { + "type": "object", + "description": "Encryption configuration used by default for newly inserted objects, when no encryption config is specified.", + "properties": { + "default_kms_key_name": { + "type": "string" + } + } + }, "etag": { "type": "string", "description": "HTTP 1.1 Entity tag for the bucket." @@ -294,15 +303,15 @@ }, "website": { "type": "object", - "description": "The bucket's website configuration.", + "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", "properties": { "mainPageSuffix": { "type": "string", - "description": "Behaves as the bucket's directory index where missing objects are treated as potential directories." + "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages." }, "notFoundPage": { "type": "string", - "description": "The custom object to return when a requested resource is not found." + "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result." } } } @@ -361,13 +370,13 @@ }, "team": { "type": "string", - "description": "The team. Can be owners, editors, or viewers." + "description": "The team." } } }, "role": { "type": "string", - "description": "The access permission for the entity. Can be READER, WRITER, or OWNER.", + "description": "The access permission for the entity.", "annotations": { "required": [ "storage.bucketAccessControls.insert" @@ -553,7 +562,7 @@ }, "cacheControl": { "type": "string", - "description": "Cache-Control directive for the object data." + "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600." }, "componentCount": { "type": "integer", @@ -574,7 +583,7 @@ }, "contentType": { "type": "string", - "description": "Content-Type of the object data." + "description": "Content-Type of the object data. If contentType is not specified, object downloads will be served as application/octet-stream." }, "crc32c": { "type": "string", @@ -612,6 +621,10 @@ "description": "The kind of item this is. For objects, this is always storage#object.", "default": "storage#object" }, + "kmsKeyName": { + "type": "string", + "description": "Cloud KMS Key used to encrypt this object, if the object is encrypted by such a key." + }, "md5Hash": { "type": "string", "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices." @@ -738,13 +751,13 @@ }, "team": { "type": "string", - "description": "The team. Can be owners, editors, or viewers." + "description": "The team." } } }, "role": { "type": "string", - "description": "The access permission for the entity. Can be READER or OWNER." + "description": "The access permission for the entity." }, "selfLink": { "type": "string", @@ -1958,6 +1971,11 @@ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", "format": "int64", "location": "query" + }, + "kmsKeyName": { + "type": "string", + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query" } }, "parameterOrder": [ @@ -2296,6 +2314,11 @@ "format": "int64", "location": "query" }, + "kmsKeyName": { + "type": "string", + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query" + }, "name": { "type": "string", "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", @@ -2547,6 +2570,11 @@ "required": true, "location": "path" }, + "destinationKmsKeyName": { + "type": "string", + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query" + }, "destinationObject": { "type": "string", "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index fa17b39d5e0b..a74cad29c883 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -7,7 +7,7 @@ // import "google.golang.org/api/storage/v1" // ... // storageService, err := storage.New(oauthHttpClient) -package storage +package storage // import "google.golang.org/api/storage/v1" import ( "bytes" @@ -169,6 +169,10 @@ type Bucket struct { // when no ACL is provided. DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` + // Encryption: Encryption configuration used by default for newly + // inserted objects, when no encryption config is specified. + Encryption *BucketEncryption `json:"encryption,omitempty"` + // Etag: HTTP 1.1 Entity tag for the bucket. Etag string `json:"etag,omitempty"` @@ -225,7 +229,9 @@ type Bucket struct { // Versioning: The bucket's versioning configuration. Versioning *BucketVersioning `json:"versioning,omitempty"` - // Website: The bucket's website configuration. + // Website: The bucket's website configuration, controlling how the + // service behaves when accessing bucket contents as a web site. See the + // Static Website Examples for more information. Website *BucketWebsite `json:"website,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -282,6 +288,26 @@ func (s *BucketCors) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } +// BucketEncryption: Encryption configuration used by default for newly +// inserted objects, when no encryption config is specified. +type BucketEncryption struct { + DefaultKmsKeyName string `json:"default_kms_key_name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *BucketEncryption) MarshalJSON() ([]byte, error) { + type noMethod BucketEncryption + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + // BucketLifecycle: The bucket's lifecycle configuration. See lifecycle // management for more information. type BucketLifecycle struct { @@ -454,14 +480,20 @@ func (s *BucketVersioning) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } -// BucketWebsite: The bucket's website configuration. +// BucketWebsite: The bucket's website configuration, controlling how +// the service behaves when accessing bucket contents as a web site. See +// the Static Website Examples for more information. type BucketWebsite struct { - // MainPageSuffix: Behaves as the bucket's directory index where missing - // objects are treated as potential directories. + // MainPageSuffix: If the requested object path is missing, the service + // will ensure the path has a trailing '/', append this suffix, and + // attempt to retrieve the resulting object. This allows the creation of + // index.html objects to represent directory pages. MainPageSuffix string `json:"mainPageSuffix,omitempty"` - // NotFoundPage: The custom object to return when a requested resource - // is not found. + // NotFoundPage: If the requested object path is missing, and any + // mainPageSuffix object is missing, if applicable, the service will + // return the named object from this bucket as the content for a 404 Not + // Found result. NotFoundPage string `json:"notFoundPage,omitempty"` // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to @@ -523,8 +555,7 @@ type BucketAccessControl struct { // ProjectTeam: The project team associated with the entity, if any. ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` - // Role: The access permission for the entity. Can be READER, WRITER, or - // OWNER. + // Role: The access permission for the entity. Role string `json:"role,omitempty"` // SelfLink: The link to this access-control entry. @@ -555,7 +586,7 @@ type BucketAccessControlProjectTeam struct { // ProjectNumber: The project number. ProjectNumber string `json:"projectNumber,omitempty"` - // Team: The team. Can be owners, editors, or viewers. + // Team: The team. Team string `json:"team,omitempty"` // ForceSendFields is a list of field names (e.g. "ProjectNumber") to @@ -778,7 +809,9 @@ type Object struct { // Bucket: The name of the bucket containing this object. Bucket string `json:"bucket,omitempty"` - // CacheControl: Cache-Control directive for the object data. + // CacheControl: Cache-Control directive for the object data. If + // omitted, and the object is accessible to all anonymous users, the + // default will be public, max-age=3600. CacheControl string `json:"cacheControl,omitempty"` // ComponentCount: Number of underlying components that make up this @@ -794,7 +827,9 @@ type Object struct { // ContentLanguage: Content-Language of the object data. ContentLanguage string `json:"contentLanguage,omitempty"` - // ContentType: Content-Type of the object data. + // ContentType: Content-Type of the object data. If contentType is not + // specified, object downloads will be served as + // application/octet-stream. ContentType string `json:"contentType,omitempty"` // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; @@ -821,6 +856,10 @@ type Object struct { // storage#object. Kind string `json:"kind,omitempty"` + // KmsKeyName: Cloud KMS Key used to encrypt this object, if the object + // is encrypted by such a key. + KmsKeyName string `json:"kmsKeyName,omitempty"` + // Md5Hash: MD5 hash of the data; encoded using base64. For more // information about using the MD5 hash, see Hashes and ETags: Best // Practices. @@ -984,7 +1023,7 @@ type ObjectAccessControl struct { // ProjectTeam: The project team associated with the entity, if any. ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` - // Role: The access permission for the entity. Can be READER or OWNER. + // Role: The access permission for the entity. Role string `json:"role,omitempty"` // SelfLink: The link to this access-control entry. @@ -1015,7 +1054,7 @@ type ObjectAccessControlProjectTeam struct { // ProjectNumber: The project number. ProjectNumber string `json:"projectNumber,omitempty"` - // Team: The team. Can be owners, editors, or viewers. + // Team: The team. Team string `json:"team,omitempty"` // ForceSendFields is a list of field names (e.g. "ProjectNumber") to @@ -1181,20 +1220,19 @@ func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAcc } func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.delete" call. @@ -1287,23 +1325,22 @@ func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccess } func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.get" call. @@ -1338,7 +1375,8 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1411,25 +1449,23 @@ func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAcc } func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.insert" call. @@ -1464,7 +1500,8 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1542,22 +1579,21 @@ func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAcces } func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.list" call. @@ -1592,7 +1628,8 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1661,26 +1698,24 @@ func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAcce } func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.patch" call. @@ -1715,7 +1750,8 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1793,26 +1829,24 @@ func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAcc } func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.update" call. @@ -1847,7 +1881,8 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1937,19 +1972,18 @@ func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { } func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.delete" call. @@ -2074,22 +2108,21 @@ func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { } func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.get" call. @@ -2124,7 +2157,8 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -2269,23 +2303,20 @@ func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { } func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.insert" call. @@ -2320,7 +2351,8 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -2484,20 +2516,18 @@ func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { } func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.list" call. @@ -2532,7 +2562,8 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -2720,25 +2751,23 @@ func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { } func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.patch" call. @@ -2773,7 +2802,8 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -2975,25 +3005,23 @@ func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { } func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.update" call. @@ -3028,7 +3056,8 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3160,23 +3189,20 @@ func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { } func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) - googleapi.SetOpaque(req.URL) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.channels.stop" call. @@ -3247,20 +3273,19 @@ func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *De } func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.delete" call. @@ -3353,23 +3378,22 @@ func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *Defau } func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.get" call. @@ -3404,7 +3428,8 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3478,25 +3503,23 @@ func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *De } func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.insert" call. @@ -3531,7 +3554,8 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3626,22 +3650,21 @@ func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *Defa } func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.list" call. @@ -3676,7 +3699,8 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3757,26 +3781,24 @@ func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *Def } func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.patch" call. @@ -3811,7 +3833,8 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3889,26 +3912,24 @@ func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *De } func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.update" call. @@ -3943,7 +3964,8 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4030,21 +4052,20 @@ func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAcc } func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.delete" call. @@ -4160,24 +4181,23 @@ func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccess } func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.get" call. @@ -4212,7 +4232,8 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4308,26 +4329,24 @@ func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAcc } func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.insert" call. @@ -4362,7 +4381,8 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4463,23 +4483,22 @@ func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAcces } func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.list" call. @@ -4514,7 +4533,8 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4606,27 +4626,25 @@ func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAcce } func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.patch" call. @@ -4661,7 +4679,8 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4762,27 +4781,25 @@ func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAcc } func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.update" call. @@ -4817,7 +4834,8 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4929,6 +4947,16 @@ func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) return c } +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -4946,26 +4974,24 @@ func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { } func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -5016,7 +5042,8 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -5073,6 +5100,11 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "format": "int64", // "location": "query", // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" // } // }, // "path": "b/{destinationBucket}/o/{destinationObject}/compose", @@ -5246,28 +5278,26 @@ func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { } func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, "sourceObject": c.sourceObject, "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -5318,7 +5348,8 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -5542,20 +5573,19 @@ func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { } func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.delete" call. @@ -5730,23 +5760,22 @@ func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { } func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -5797,7 +5826,8 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -5950,6 +5980,16 @@ func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in return c } +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + // Name sets the optional parameter "name": Name of the object. Required // when the object metadata is not otherwise provided. Overrides the // object metadata's name value, if any. For information about how to @@ -6000,6 +6040,9 @@ func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { // supplied. // At most one of Media and ResumableMedia may be set. func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + if ct := c.object.ContentType; ct != "" { + options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) + } opts := googleapi.ProcessMediaOptions(options) chunkSize := opts.ChunkSize if !opts.ForceEmptyContentType { @@ -6056,12 +6099,14 @@ func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { } func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") if c.media_ != nil || c.mediaBuffer_ != nil { @@ -6072,26 +6117,26 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { } c.urlParams_.Set("uploadType", protocol) } - urls += "?" + c.urlParams_.Encode() + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } if c.media_ != nil { - var combined io.ReadCloser - combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) + combined, ctype := gensupport.CombineBodyMedia(body, "application/json", c.media_, c.mediaType_) defer combined.Close() + reqHeaders.Set("Content-Type", ctype) body = combined } + if c.mediaBuffer_ != nil && c.mediaType_ != "" { + reqHeaders.Set("X-Upload-Content-Type", c.mediaType_) + } + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - if c.mediaBuffer_ != nil && c.mediaType_ != "" { - req.Header.Set("X-Upload-Content-Type", c.mediaType_) - } - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.insert" call. @@ -6153,7 +6198,8 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -6215,6 +6261,11 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, // "name": { // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "query", @@ -6372,22 +6423,21 @@ func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { } func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.list" call. @@ -6422,7 +6472,8 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -6628,26 +6679,24 @@ func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { } func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.patch" call. @@ -6682,7 +6731,8 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -6812,6 +6862,17 @@ func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, desti return c } +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + // DestinationPredefinedAcl sets the optional parameter // "destinationPredefinedAcl": Apply a predefined set of access controls // to the destination object. @@ -6965,28 +7026,26 @@ func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { } func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, "sourceObject": c.sourceObject, "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.rewrite" call. @@ -7021,7 +7080,8 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -7042,6 +7102,11 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // "required": true, // "type": "string" // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, // "destinationObject": { // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", // "location": "path", @@ -7285,26 +7350,24 @@ func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { } func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -7355,7 +7418,8 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -7551,25 +7615,23 @@ func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall } func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.watchAll" call. @@ -7604,7 +7666,8 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil diff --git a/vendor/google.golang.org/cloud/.travis.yml b/vendor/google.golang.org/cloud/.travis.yml deleted file mode 100644 index 197dedc46716..000000000000 --- a/vendor/google.golang.org/cloud/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -sudo: false -language: go -go: -- 1.5 -- 1.6 -install: -- go get -v google.golang.org/cloud/... -script: -- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d -- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" - go test -v google.golang.org/cloud/... diff --git a/vendor/google.golang.org/cloud/AUTHORS b/vendor/google.golang.org/cloud/AUTHORS deleted file mode 100644 index f92e5cff9c68..000000000000 --- a/vendor/google.golang.org/cloud/AUTHORS +++ /dev/null @@ -1,14 +0,0 @@ -# This is the official list of cloud authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. - -Google Inc. -Ingo Oeser -Palm Stone Games, Inc. -PaweÅ‚ Knap -Péter Szilágyi -Tyler Treat diff --git a/vendor/google.golang.org/cloud/CONTRIBUTING.md b/vendor/google.golang.org/cloud/CONTRIBUTING.md deleted file mode 100644 index adb9ec1964db..000000000000 --- a/vendor/google.golang.org/cloud/CONTRIBUTING.md +++ /dev/null @@ -1,115 +0,0 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. -1. Get the cloud package by running `go get -d google.golang.org/cloud`. - 1. If you have already checked out the source, make sure that the remote git - origin is https://code.googlesource.com/gocloud: - - git remote set-url origin https://code.googlesource.com/gocloud -1. Make changes and create a change by running `git codereview change `, -provide a command message, and use `git codereview mail` to create a Gerrit CL. -1. Keep amending to the change and mail as your recieve feedback. - -## Integration Tests - -In addition to the unit tests, you may run the integration test suite. - -To run the integrations tests, creating and configuration of a project in the -Google Developers Console is required. Once you create a project, set the -following environment variables to be able to run the against the actual APIs. - -- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) -- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. - -Create a storage bucket with the same name as the project ID set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**. -The storage integration test will create and delete some objects in this bucket. - -Install the [gcloud command-line tool][gcloudcli] to your machine and use it -to create the indexes used in the datastore integration tests with indexes -found in `datastore/testdata/index.yaml`: - -From the project's root directory: - -``` sh -# Install the app component -$ gcloud components update app - -# Set the default project in your env -$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID - -# Authenticate the gcloud tool with your account -$ gcloud auth login - -# Create the indexes -$ gcloud preview datastore create-indexes datastore/testdata/index.yaml - -``` - -Once you've set the environment variables, you can run the integration tests by -running: - -``` sh -$ go test -v google.golang.org/cloud/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your work**, -then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - -[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/google.golang.org/cloud/CONTRIBUTORS b/vendor/google.golang.org/cloud/CONTRIBUTORS deleted file mode 100644 index 27db7918c697..000000000000 --- a/vendor/google.golang.org/cloud/CONTRIBUTORS +++ /dev/null @@ -1,29 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# Names should be added to this file as: -# Name - -# Keep the list alphabetically sorted. - -Andrew Gerrand -Brad Fitzpatrick -Burcu Dogan -Dave Day -David Sansome -David Symonds -Glenn Lewis -Ingo Oeser -Johan Euphrosine -Luna Duclos -Michael McGreevy -Omar Jarjur -PaweÅ‚ Knap -Péter Szilágyi -Toby Burress -Tyler Treat diff --git a/vendor/google.golang.org/cloud/README.md b/vendor/google.golang.org/cloud/README.md deleted file mode 100644 index a852d68d24d1..000000000000 --- a/vendor/google.golang.org/cloud/README.md +++ /dev/null @@ -1,186 +0,0 @@ -# Google Cloud for Go - -[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang) -[![GoDoc](https://godoc.org/google.golang.org/cloud?status.svg)](https://godoc.org/google.golang.org/cloud) - -``` go -import "google.golang.org/cloud" -``` - -**NOTE:** These packages are experimental, and may occasionally make -backwards-incompatible changes. - -**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). - -Go packages for Google Cloud Platform services. Supported APIs are: - -Google API | Status | Package --------------------------------|--------------|----------------------------------------------------------- -[Datastore][cloud-datastore] | experimental | [`google.golang.org/cloud/datastore`][cloud-datastore-ref] -[Cloud Storage][cloud-storage] | experimental | [`google.golang.org/cloud/storage`][cloud-storage-ref] -[Pub/Sub][cloud-pubsub] | experimental | [`google.golang.org/cloud/pubsub`][cloud-pubsub-ref] -[BigTable][cloud-bigtable] | stable | [`google.golang.org/cloud/bigtable`][cloud-bigtable-ref] -[BigQuery][cloud-bigquery] | experimental | [`google.golang.org/cloud/bigquery`][cloud-bigquery-ref] -[Logging][cloud-logging] | experimental | [`google.golang.org/cloud/logging`][cloud-logging-ref] - -> **Experimental status**: the API is still being actively developed. As a -> result, it might change in backward-incompatible ways and is not recommended -> for production use. -> -> **Beta status**: the API is largely complete, but still has outstanding -> features and bugs to be addressed. There may be minor backwards-incompatible -> changes where necessary. -> -> **Stable status**: the API is mature and ready for production use. We will -> continue addressing bugs and feature requests. - -Documentation and examples are available at -https://godoc.org/google.golang.org/cloud - -## Authorization - -By default, each API will use [Google Application Default Credentials][default-creds] -for authorization credentials used in calling the API endpoints. This will allow your -application to run in many environments without requiring explicit configuration. - -Manually-configured authorization can be achieved using the -[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to -create an `oauth2.TokenSource`. This token source can be passed to the `NewClient` -function for the relevant API using a -[`cloud.WithTokenSource`][https://godoc.org/google.golang.org/cloud#WithTokenSource] -option. - -## Google Cloud Datastore - -[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully- -managed, schemaless database for storing non-relational data. Cloud Datastore -automatically scales with your users and supports ACID transactions, high availability -of reads and writes, strong consistency for reads and ancestor queries, and eventual -consistency for all other queries. - -Follow the [activation instructions][cloud-datastore-activation] to use the Google -Cloud Datastore API with your project. - -https://godoc.org/google.golang.org/cloud/datastore - -First create a `datastore.Client` to use throughout your application: - -```go -client, err := datastore.NewClient(ctx, "my-project-id") -if err != nil { - log.Fatalln(err) -} -``` - -Then use that client to interact with the API: - -```go -type Post struct { - Title string - Body string `datastore:",noindex"` - PublishedAt time.Time -} -keys := []*datastore.Key{ - datastore.NewKey(ctx, "Post", "post1", 0, nil), - datastore.NewKey(ctx, "Post", "post2", 0, nil), -} -posts := []*Post{ - {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, - {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, -} -if _, err := client.PutMulti(ctx, keys, posts); err != nil { - log.Fatal(err) -} -``` - -## Google Cloud Storage - -[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store -data on Google infrastructure with very high reliability, performance and availability, -and can be used to distribute large data objects to users via direct download. - -https://godoc.org/google.golang.org/cloud/storage - -First create a `storage.Client` to use throughout your application: - -```go -client, err := storage.NewClient(ctx) -if err != nil { - log.Fatal(err) -} -``` - -```go -// Read the object1 from bucket. -rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) -if err != nil { - log.Fatal(err) -} -defer rc.Close() -body, err := ioutil.ReadAll(rc) -if err != nil { - log.Fatal(err) -} -``` - -## Google Cloud Pub/Sub - -[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect -your services with reliable, many-to-many, asynchronous messaging hosted on Google's -infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation -for building your own robust, global services. - -https://godoc.org/google.golang.org/cloud/pubsub - - -```go -// Publish "hello world" on topic1. -msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{ - Data: []byte("hello world"), -}) -if err != nil { - log.Println(err) -} -// Pull messages via subscription1. -msgs, err := pubsub.Pull(ctx, "subscription1", 1) -if err != nil { - log.Println(err) -} -``` - -## Contributing - -Contributions are welcome. Please, see the -[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md) -document for details. We're using Gerrit for our code reviews. Please don't open pull -requests against this repo, new pull requests will be automatically closed. - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. -See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) -for more information. - -[cloud-datastore]: https://cloud.google.com/datastore/ -[cloud-datastore-ref]: https://godoc.org/google.golang.org/cloud/datastore -[cloud-datastore-docs]: https://cloud.google.com/datastore/docs -[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate - -[cloud-pubsub]: https://cloud.google.com/pubsub/ -[cloud-pubsub-ref]: https://godoc.org/google.golang.org/cloud/pubsub -[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs - -[cloud-storage]: https://cloud.google.com/storage/ -[cloud-storage-ref]: https://godoc.org/google.golang.org/cloud/storage -[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview -[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets - -[cloud-bigtable]: https://cloud.google.com/bigtable/ -[cloud-bigtable-ref]: https://godoc.org/google.golang.org/cloud/bigtable - -[cloud-bigquery]: https://cloud.google.com/bigquery/ -[cloud-bigquery-ref]: https://godoc.org/google.golang.org/cloud/bigquery - -[cloud-logging]: https://cloud.google.com/logging/ -[cloud-logging-ref]: https://godoc.org/google.golang.org/cloud/logging - -[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials diff --git a/vendor/google.golang.org/cloud/cloud.go b/vendor/google.golang.org/cloud/cloud.go deleted file mode 100644 index 34ea4494bde8..000000000000 --- a/vendor/google.golang.org/cloud/cloud.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package cloud contains Google Cloud Platform APIs related types -// and common functions. -package cloud - -import ( - "net/http" - - "golang.org/x/net/context" - "google.golang.org/cloud/internal" -) - -// NewContext returns a new context that uses the provided http.Client. -// Provided http.Client is responsible to authorize and authenticate -// the requests made to the Google Cloud APIs. -// It mutates the client's original Transport to append the cloud -// package's user-agent to the outgoing requests. -// You can obtain the project ID from the Google Developers Console, -// https://console.developers.google.com. -func NewContext(projID string, c *http.Client) context.Context { - if c == nil { - panic("invalid nil *http.Client passed to NewContext") - } - return WithContext(context.Background(), projID, c) -} - -// WithContext returns a new context in a similar way NewContext does, -// but initiates the new context with the specified parent. -func WithContext(parent context.Context, projID string, c *http.Client) context.Context { - // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. - // Do User-Agent some other way. - if c == nil { - panic("invalid nil *http.Client passed to WithContext") - } - if _, ok := c.Transport.(*internal.Transport); !ok { - base := c.Transport - if base == nil { - base = http.DefaultTransport - } - c.Transport = &internal.Transport{Base: base} - } - return internal.WithContext(parent, projID, c) -} diff --git a/vendor/google.golang.org/cloud/internal/cloud.go b/vendor/google.golang.org/cloud/internal/cloud.go deleted file mode 100644 index 59428803dd9c..000000000000 --- a/vendor/google.golang.org/cloud/internal/cloud.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides support for the cloud packages. -// -// Users should not import this package directly. -package internal - -import ( - "fmt" - "net/http" - "sync" - - "golang.org/x/net/context" -) - -type contextKey struct{} - -func WithContext(parent context.Context, projID string, c *http.Client) context.Context { - if c == nil { - panic("nil *http.Client passed to WithContext") - } - if projID == "" { - panic("empty project ID passed to WithContext") - } - return context.WithValue(parent, contextKey{}, &cloudContext{ - ProjectID: projID, - HTTPClient: c, - }) -} - -const userAgent = "gcloud-golang/0.1" - -type cloudContext struct { - ProjectID string - HTTPClient *http.Client - - mu sync.Mutex // guards svc - svc map[string]interface{} // e.g. "storage" => *rawStorage.Service -} - -// Service returns the result of the fill function if it's never been -// called before for the given name (which is assumed to be an API -// service name, like "datastore"). If it has already been cached, the fill -// func is not run. -// It's safe for concurrent use by multiple goroutines. -func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} { - return cc(ctx).service(name, fill) -} - -func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} { - c.mu.Lock() - defer c.mu.Unlock() - - if c.svc == nil { - c.svc = make(map[string]interface{}) - } else if v, ok := c.svc[name]; ok { - return v - } - v := fill(c.HTTPClient) - c.svc[name] = v - return v -} - -// Transport is an http.RoundTripper that appends -// Google Cloud client's user-agent to the original -// request's user-agent header. -type Transport struct { - // Base is the actual http.RoundTripper - // requests will use. It must not be nil. - Base http.RoundTripper -} - -// RoundTrip appends a user-agent to the existing user-agent -// header and delegates the request to the base http.RoundTripper. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - req = cloneRequest(req) - ua := req.Header.Get("User-Agent") - if ua == "" { - ua = userAgent - } else { - ua = fmt.Sprintf("%s %s", ua, userAgent) - } - req.Header.Set("User-Agent", ua) - return t.Base.RoundTrip(req) -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -func ProjID(ctx context.Context) string { - return cc(ctx).ProjectID -} - -func HTTPClient(ctx context.Context) *http.Client { - return cc(ctx).HTTPClient -} - -// cc returns the internal *cloudContext (cc) state for a context.Context. -// It panics if the user did it wrong. -func cc(ctx context.Context) *cloudContext { - if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok { - return c - } - panic("invalid context.Context type; it should be created with cloud.NewContext") -} diff --git a/vendor/google.golang.org/cloud/internal/opts/option.go b/vendor/google.golang.org/cloud/internal/opts/option.go deleted file mode 100644 index 844d310447d9..000000000000 --- a/vendor/google.golang.org/cloud/internal/opts/option.go +++ /dev/null @@ -1,25 +0,0 @@ -// Package opts holds the DialOpts struct, configurable by -// cloud.ClientOptions to set up transports for cloud packages. -// -// This is a separate page to prevent cycles between the core -// cloud packages. -package opts - -import ( - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/grpc" -) - -type DialOpt struct { - Endpoint string - Scopes []string - UserAgent string - - TokenSource oauth2.TokenSource - - HTTPClient *http.Client - GRPCClient *grpc.ClientConn - GRPCDialOpts []grpc.DialOption -} diff --git a/vendor/google.golang.org/cloud/key.json.enc b/vendor/google.golang.org/cloud/key.json.enc deleted file mode 100644 index 2f673a84b143..000000000000 Binary files a/vendor/google.golang.org/cloud/key.json.enc and /dev/null differ diff --git a/vendor/google.golang.org/cloud/option.go b/vendor/google.golang.org/cloud/option.go deleted file mode 100644 index 8a443b4eca08..000000000000 --- a/vendor/google.golang.org/cloud/option.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cloud - -import ( - "net/http" - - "golang.org/x/oauth2" - "google.golang.org/cloud/internal/opts" - "google.golang.org/grpc" -) - -// ClientOption is used when construct clients for each cloud service. -type ClientOption interface { - // Resolve configures the given DialOpts for this option. - Resolve(*opts.DialOpt) -} - -// WithTokenSource returns a ClientOption that specifies an OAuth2 token -// source to be used as the basis for authentication. -func WithTokenSource(s oauth2.TokenSource) ClientOption { - return withTokenSource{s} -} - -type withTokenSource struct{ ts oauth2.TokenSource } - -func (w withTokenSource) Resolve(o *opts.DialOpt) { - o.TokenSource = w.ts -} - -// WithEndpoint returns a ClientOption that overrides the default endpoint -// to be used for a service. -func WithEndpoint(url string) ClientOption { - return withEndpoint(url) -} - -type withEndpoint string - -func (w withEndpoint) Resolve(o *opts.DialOpt) { - o.Endpoint = string(w) -} - -// WithScopes returns a ClientOption that overrides the default OAuth2 scopes -// to be used for a service. -func WithScopes(scope ...string) ClientOption { - return withScopes(scope) -} - -type withScopes []string - -func (w withScopes) Resolve(o *opts.DialOpt) { - s := make([]string, len(w)) - copy(s, w) - o.Scopes = s -} - -// WithUserAgent returns a ClientOption that sets the User-Agent. -func WithUserAgent(ua string) ClientOption { - return withUA(ua) -} - -type withUA string - -func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) } - -// WithBaseHTTP returns a ClientOption that specifies the HTTP client to -// use as the basis of communications. This option may only be used with -// services that support HTTP as their communication transport. -func WithBaseHTTP(client *http.Client) ClientOption { - return withBaseHTTP{client} -} - -type withBaseHTTP struct{ client *http.Client } - -func (w withBaseHTTP) Resolve(o *opts.DialOpt) { - o.HTTPClient = w.client -} - -// WithBaseGRPC returns a ClientOption that specifies the gRPC client -// connection to use as the basis of communications. This option many only be -// used with services that support gRPC as their communication transport. -func WithBaseGRPC(client *grpc.ClientConn) ClientOption { - return withBaseGRPC{client} -} - -type withBaseGRPC struct{ client *grpc.ClientConn } - -func (w withBaseGRPC) Resolve(o *opts.DialOpt) { - o.GRPCClient = w.client -} - -// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption -// to an underlying gRPC dial. It does not work with WithBaseGRPC. -func WithGRPCDialOption(opt grpc.DialOption) ClientOption { - return withGRPCDialOption{opt} -} - -type withGRPCDialOption struct{ opt grpc.DialOption } - -func (w withGRPCDialOption) Resolve(o *opts.DialOpt) { - o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) -} diff --git a/vendor/k8s.io/client-go/1.4/discovery/discovery_client.go b/vendor/k8s.io/client-go/1.4/discovery/discovery_client.go index a4c729dd715f..8b18e096fd20 100644 --- a/vendor/k8s.io/client-go/1.4/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/1.4/discovery/discovery_client.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "net/url" + "sort" "strings" "github.com/emicklei/go-restful/swagger" @@ -30,7 +31,6 @@ import ( "k8s.io/client-go/1.4/pkg/api/v1" "k8s.io/client-go/1.4/pkg/runtime" "k8s.io/client-go/1.4/pkg/runtime/serializer" - utilerrors "k8s.io/client-go/1.4/pkg/util/errors" "k8s.io/client-go/1.4/pkg/version" "k8s.io/client-go/1.4/rest" ) @@ -149,9 +149,8 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r // ignore 403 or 404 error to be compatible with an v1.0 server. if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { return resources, nil - } else { - return nil, err } + return nil, err } return resources, nil } @@ -174,6 +173,29 @@ func (d *DiscoveryClient) ServerResources() (map[string]*unversioned.APIResource return result, nil } +// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. +type ErrGroupDiscoveryFailed struct { + // Groups is a list of the groups that failed to load and the error cause + Groups map[unversioned.GroupVersion]error +} + +// Error implements the error interface +func (e *ErrGroupDiscoveryFailed) Error() string { + var groups []string + for k, v := range e.Groups { + groups = append(groups, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(groups) + return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) +} + +// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover +// a complete list of APIs for the client to use. +func IsGroupDiscoveryFailedError(err error) bool { + _, ok := err.(*ErrGroupDiscoveryFailed) + return err != nil && ok +} + // serverPreferredResources returns the supported resources with the version preferred by the // server. If namespaced is true, only namespaced resources will be returned. func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversioned.GroupVersionResource, error) { @@ -183,15 +205,18 @@ func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversion return results, err } - allErrs := []error{} + var failedGroups map[unversioned.GroupVersion]error for _, apiGroup := range serverGroupList.Groups { preferredVersion := apiGroup.PreferredVersion + groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version} apiResourceList, err := d.ServerResourcesForGroupVersion(preferredVersion.GroupVersion) if err != nil { - allErrs = append(allErrs, err) + if failedGroups == nil { + failedGroups = make(map[unversioned.GroupVersion]error) + } + failedGroups[groupVersion] = err continue } - groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version} for _, apiResource := range apiResourceList.APIResources { // ignore the root scoped resources if "namespaced" is true. if namespaced && !apiResource.Namespaced { @@ -203,7 +228,10 @@ func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversion results = append(results, groupVersion.WithResource(apiResource.Name)) } } - return results, utilerrors.NewAggregate(allErrs) + if len(failedGroups) > 0 { + return results, &ErrGroupDiscoveryFailed{Groups: failedGroups} + } + return results, nil } // ServerPreferredResources returns the supported resources with the version preferred by the diff --git a/vendor/k8s.io/client-go/1.4/discovery/client_test.go b/vendor/k8s.io/client-go/1.4/discovery/discovery_client_test.go similarity index 71% rename from vendor/k8s.io/client-go/1.4/discovery/client_test.go rename to vendor/k8s.io/client-go/1.4/discovery/discovery_client_test.go index c898b2e4ee73..3331f2596ac9 100644 --- a/vendor/k8s.io/client-go/1.4/discovery/client_test.go +++ b/vendor/k8s.io/client-go/1.4/discovery/discovery_client_test.go @@ -320,3 +320,137 @@ func TestGetSwaggerSchemaFail(t *testing.T) { t.Errorf("expected an error, got %v", err) } } + +func TestGetServerPreferredResources(t *testing.T) { + stable := unversioned.APIResourceList{ + GroupVersion: "v1", + APIResources: []unversioned.APIResource{ + {Name: "pods", Namespaced: true, Kind: "Pod"}, + {Name: "services", Namespaced: true, Kind: "Service"}, + {Name: "namespaces", Namespaced: false, Kind: "Namespace"}, + }, + } + /*beta := unversioned.APIResourceList{ + GroupVersion: "extensions/v1", + APIResources: []unversioned.APIResource{ + {Name: "deployments", Namespaced: true, Kind: "Deployment"}, + {Name: "ingresses", Namespaced: true, Kind: "Ingress"}, + {Name: "jobs", Namespaced: true, Kind: "Job"}, + }, + }*/ + tests := []struct { + resourcesList *unversioned.APIResourceList + response func(w http.ResponseWriter, req *http.Request) + expectErr func(err error) bool + }{ + { + resourcesList: &stable, + expectErr: IsGroupDiscoveryFailedError, + response: func(w http.ResponseWriter, req *http.Request) { + var list interface{} + switch req.URL.Path { + case "/apis/extensions/v1beta1": + w.WriteHeader(http.StatusInternalServerError) + return + case "/api/v1": + list = &stable + case "/api": + list = &unversioned.APIVersions{ + Versions: []string{ + "v1", + }, + } + case "/apis": + list = &unversioned.APIGroupList{ + Groups: []unversioned.APIGroup{ + { + Versions: []unversioned.GroupVersionForDiscovery{ + {GroupVersion: "extensions/v1beta1"}, + }, + }, + }, + } + default: + t.Logf("unexpected request: %s", req.URL.Path) + w.WriteHeader(http.StatusNotFound) + return + } + output, err := json.Marshal(list) + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(output) + }, + }, + { + resourcesList: nil, + expectErr: IsGroupDiscoveryFailedError, + response: func(w http.ResponseWriter, req *http.Request) { + var list interface{} + switch req.URL.Path { + case "/apis/extensions/v1beta1": + w.WriteHeader(http.StatusInternalServerError) + return + case "/api/v1": + w.WriteHeader(http.StatusInternalServerError) + case "/api": + list = &unversioned.APIVersions{ + Versions: []string{ + "v1", + }, + } + case "/apis": + list = &unversioned.APIGroupList{ + Groups: []unversioned.APIGroup{ + { + Versions: []unversioned.GroupVersionForDiscovery{ + {GroupVersion: "extensions/v1beta1"}, + }, + }, + }, + } + default: + t.Logf("unexpected request: %s", req.URL.Path) + w.WriteHeader(http.StatusNotFound) + return + } + output, err := json.Marshal(list) + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write(output) + }, + }, + /*{ + resourcesList: &stable, + },*/ + } + for _, test := range tests { + server := httptest.NewServer(http.HandlerFunc(test.response)) + defer server.Close() + + client := NewDiscoveryClientForConfigOrDie(&rest.Config{Host: server.URL}) + got, err := client.ServerPreferredResources() + if test.expectErr != nil { + if err == nil { + t.Error("unexpected non-error") + } + + continue + } + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if !reflect.DeepEqual(got, test.resourcesList) { + t.Errorf("expected:\n%v\ngot:\n%v\n", test.resourcesList, got) + } + server.Close() + } +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/clientset.go b/vendor/k8s.io/client-go/1.4/kubernetes/clientset.go index e28c02ca0221..7cf01da1fa87 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/clientset.go @@ -19,12 +19,17 @@ package kubernetes import ( "github.com/golang/glog" discovery "k8s.io/client-go/1.4/discovery" + v1alpha1apps "k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1" + v1beta1authentication "k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1" v1beta1authorization "k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1" v1autoscaling "k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1" v1batch "k8s.io/client-go/1.4/kubernetes/typed/batch/v1" + v1alpha1certificates "k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1" v1core "k8s.io/client-go/1.4/kubernetes/typed/core/v1" v1beta1extensions "k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1" v1alpha1policy "k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1" + v1alpha1rbac "k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1" + v1beta1storage "k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1" "k8s.io/client-go/1.4/pkg/util/flowcontrol" rest "k8s.io/client-go/1.4/rest" ) @@ -32,11 +37,16 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface Core() v1core.CoreInterface + Apps() v1alpha1apps.AppsInterface + Authentication() v1beta1authentication.AuthenticationInterface Authorization() v1beta1authorization.AuthorizationInterface Autoscaling() v1autoscaling.AutoscalingInterface Batch() v1batch.BatchInterface + Certificates() v1alpha1certificates.CertificatesInterface Extensions() v1beta1extensions.ExtensionsInterface Policy() v1alpha1policy.PolicyInterface + Rbac() v1alpha1rbac.RbacInterface + Storage() v1beta1storage.StorageInterface } // Clientset contains the clients for groups. Each group has exactly one @@ -44,11 +54,16 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient *v1core.CoreClient + *v1alpha1apps.AppsClient + *v1beta1authentication.AuthenticationClient *v1beta1authorization.AuthorizationClient *v1autoscaling.AutoscalingClient *v1batch.BatchClient + *v1alpha1certificates.CertificatesClient *v1beta1extensions.ExtensionsClient *v1alpha1policy.PolicyClient + *v1alpha1rbac.RbacClient + *v1beta1storage.StorageClient } // Core retrieves the CoreClient @@ -59,6 +74,22 @@ func (c *Clientset) Core() v1core.CoreInterface { return c.CoreClient } +// Apps retrieves the AppsClient +func (c *Clientset) Apps() v1alpha1apps.AppsInterface { + if c == nil { + return nil + } + return c.AppsClient +} + +// Authentication retrieves the AuthenticationClient +func (c *Clientset) Authentication() v1beta1authentication.AuthenticationInterface { + if c == nil { + return nil + } + return c.AuthenticationClient +} + // Authorization retrieves the AuthorizationClient func (c *Clientset) Authorization() v1beta1authorization.AuthorizationInterface { if c == nil { @@ -83,6 +114,14 @@ func (c *Clientset) Batch() v1batch.BatchInterface { return c.BatchClient } +// Certificates retrieves the CertificatesClient +func (c *Clientset) Certificates() v1alpha1certificates.CertificatesInterface { + if c == nil { + return nil + } + return c.CertificatesClient +} + // Extensions retrieves the ExtensionsClient func (c *Clientset) Extensions() v1beta1extensions.ExtensionsInterface { if c == nil { @@ -99,6 +138,22 @@ func (c *Clientset) Policy() v1alpha1policy.PolicyInterface { return c.PolicyClient } +// Rbac retrieves the RbacClient +func (c *Clientset) Rbac() v1alpha1rbac.RbacInterface { + if c == nil { + return nil + } + return c.RbacClient +} + +// Storage retrieves the StorageClient +func (c *Clientset) Storage() v1beta1storage.StorageInterface { + if c == nil { + return nil + } + return c.StorageClient +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.DiscoveryClient @@ -116,6 +171,14 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + clientset.AppsClient, err = v1alpha1apps.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + clientset.AuthenticationClient, err = v1beta1authentication.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } clientset.AuthorizationClient, err = v1beta1authorization.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -128,6 +191,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + clientset.CertificatesClient, err = v1alpha1certificates.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } clientset.ExtensionsClient, err = v1beta1extensions.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -136,6 +203,14 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + clientset.RbacClient, err = v1alpha1rbac.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + clientset.StorageClient, err = v1beta1storage.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } clientset.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -150,11 +225,16 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var clientset Clientset clientset.CoreClient = v1core.NewForConfigOrDie(c) + clientset.AppsClient = v1alpha1apps.NewForConfigOrDie(c) + clientset.AuthenticationClient = v1beta1authentication.NewForConfigOrDie(c) clientset.AuthorizationClient = v1beta1authorization.NewForConfigOrDie(c) clientset.AutoscalingClient = v1autoscaling.NewForConfigOrDie(c) clientset.BatchClient = v1batch.NewForConfigOrDie(c) + clientset.CertificatesClient = v1alpha1certificates.NewForConfigOrDie(c) clientset.ExtensionsClient = v1beta1extensions.NewForConfigOrDie(c) clientset.PolicyClient = v1alpha1policy.NewForConfigOrDie(c) + clientset.RbacClient = v1alpha1rbac.NewForConfigOrDie(c) + clientset.StorageClient = v1beta1storage.NewForConfigOrDie(c) clientset.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &clientset @@ -164,11 +244,16 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c *rest.RESTClient) *Clientset { var clientset Clientset clientset.CoreClient = v1core.New(c) + clientset.AppsClient = v1alpha1apps.New(c) + clientset.AuthenticationClient = v1beta1authentication.New(c) clientset.AuthorizationClient = v1beta1authorization.New(c) clientset.AutoscalingClient = v1autoscaling.New(c) clientset.BatchClient = v1batch.New(c) + clientset.CertificatesClient = v1alpha1certificates.New(c) clientset.ExtensionsClient = v1beta1extensions.New(c) clientset.PolicyClient = v1alpha1policy.New(c) + clientset.RbacClient = v1alpha1rbac.New(c) + clientset.StorageClient = v1beta1storage.New(c) clientset.DiscoveryClient = discovery.NewDiscoveryClient(c) return &clientset diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/doc.go index 43c96d2ef67c..cf2ae4bd20bb 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // This package has the automatically generated clientset. package kubernetes diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/fake/clientset_generated.go b/vendor/k8s.io/client-go/1.4/kubernetes/fake/clientset_generated.go index 5a7c005fc96e..82332f297719 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/fake/clientset_generated.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/fake/clientset_generated.go @@ -20,18 +20,28 @@ import ( "k8s.io/client-go/1.4/discovery" fakediscovery "k8s.io/client-go/1.4/discovery/fake" clientset "k8s.io/client-go/1.4/kubernetes" + v1alpha1apps "k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1" + fakev1alpha1apps "k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake" + v1beta1authentication "k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1" + fakev1beta1authentication "k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake" v1beta1authorization "k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1" fakev1beta1authorization "k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1/fake" v1autoscaling "k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1" fakev1autoscaling "k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1/fake" v1batch "k8s.io/client-go/1.4/kubernetes/typed/batch/v1" fakev1batch "k8s.io/client-go/1.4/kubernetes/typed/batch/v1/fake" + v1alpha1certificates "k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1" + fakev1alpha1certificates "k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake" v1core "k8s.io/client-go/1.4/kubernetes/typed/core/v1" fakev1core "k8s.io/client-go/1.4/kubernetes/typed/core/v1/fake" v1beta1extensions "k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1" fakev1beta1extensions "k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake" v1alpha1policy "k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1" fakev1alpha1policy "k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1/fake" + v1alpha1rbac "k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1" + fakev1alpha1rbac "k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake" + v1beta1storage "k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1" + fakev1beta1storage "k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake" "k8s.io/client-go/1.4/pkg/api" "k8s.io/client-go/1.4/pkg/apimachinery/registered" "k8s.io/client-go/1.4/pkg/runtime" @@ -77,6 +87,16 @@ func (c *Clientset) Core() v1core.CoreInterface { return &fakev1core.FakeCore{Fake: &c.Fake} } +// Apps retrieves the AppsClient +func (c *Clientset) Apps() v1alpha1apps.AppsInterface { + return &fakev1alpha1apps.FakeApps{Fake: &c.Fake} +} + +// Authentication retrieves the AuthenticationClient +func (c *Clientset) Authentication() v1beta1authentication.AuthenticationInterface { + return &fakev1beta1authentication.FakeAuthentication{Fake: &c.Fake} +} + // Authorization retrieves the AuthorizationClient func (c *Clientset) Authorization() v1beta1authorization.AuthorizationInterface { return &fakev1beta1authorization.FakeAuthorization{Fake: &c.Fake} @@ -92,6 +112,11 @@ func (c *Clientset) Batch() v1batch.BatchInterface { return &fakev1batch.FakeBatch{Fake: &c.Fake} } +// Certificates retrieves the CertificatesClient +func (c *Clientset) Certificates() v1alpha1certificates.CertificatesInterface { + return &fakev1alpha1certificates.FakeCertificates{Fake: &c.Fake} +} + // Extensions retrieves the ExtensionsClient func (c *Clientset) Extensions() v1beta1extensions.ExtensionsInterface { return &fakev1beta1extensions.FakeExtensions{Fake: &c.Fake} @@ -101,3 +126,13 @@ func (c *Clientset) Extensions() v1beta1extensions.ExtensionsInterface { func (c *Clientset) Policy() v1alpha1policy.PolicyInterface { return &fakev1alpha1policy.FakePolicy{Fake: &c.Fake} } + +// Rbac retrieves the RbacClient +func (c *Clientset) Rbac() v1alpha1rbac.RbacInterface { + return &fakev1alpha1rbac.FakeRbac{Fake: &c.Fake} +} + +// Storage retrieves the StorageClient +func (c *Clientset) Storage() v1beta1storage.StorageInterface { + return &fakev1beta1storage.FakeStorage{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/fake/doc.go index 3617f59cebe4..3b90ebdab047 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/fake/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/fake/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // This package has the automatically generated fake clientset. package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/import_known_versions.go b/vendor/k8s.io/client-go/1.4/kubernetes/import_known_versions.go index a1ae787f4f58..9ce9fffc2b44 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/import_known_versions.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/import_known_versions.go @@ -22,11 +22,16 @@ import ( _ "k8s.io/client-go/1.4/pkg/api/install" "k8s.io/client-go/1.4/pkg/apimachinery/registered" + _ "k8s.io/client-go/1.4/pkg/apis/apps/install" + _ "k8s.io/client-go/1.4/pkg/apis/authentication/install" _ "k8s.io/client-go/1.4/pkg/apis/authorization/install" _ "k8s.io/client-go/1.4/pkg/apis/autoscaling/install" _ "k8s.io/client-go/1.4/pkg/apis/batch/install" + _ "k8s.io/client-go/1.4/pkg/apis/certificates/install" _ "k8s.io/client-go/1.4/pkg/apis/extensions/install" _ "k8s.io/client-go/1.4/pkg/apis/policy/install" + _ "k8s.io/client-go/1.4/pkg/apis/rbac/install" + _ "k8s.io/client-go/1.4/pkg/apis/storage/install" ) func init() { diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/apps_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/apps_client.go new file mode 100644 index 000000000000..3e9e2b221e13 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/apps_client.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + registered "k8s.io/client-go/1.4/pkg/apimachinery/registered" + serializer "k8s.io/client-go/1.4/pkg/runtime/serializer" + rest "k8s.io/client-go/1.4/rest" +) + +type AppsInterface interface { + GetRESTClient() *rest.RESTClient + PetSetsGetter +} + +// AppsClient is used to interact with features provided by the Apps group. +type AppsClient struct { + *rest.RESTClient +} + +func (c *AppsClient) PetSets(namespace string) PetSetInterface { + return newPetSets(c, namespace) +} + +// NewForConfig creates a new AppsClient for the given config. +func NewForConfig(c *rest.Config) (*AppsClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsClient{client}, nil +} + +// NewForConfigOrDie creates a new AppsClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsClient for the given RESTClient. +func New(c *rest.RESTClient) *AppsClient { + return &AppsClient{c} +} + +func setConfigDefaults(config *rest.Config) error { + // if apps group is not registered, return an error + g, err := registered.Group("apps") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsClient) GetRESTClient() *rest.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/doc.go new file mode 100644 index 000000000000..e0b389504493 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake/doc.go new file mode 100644 index 000000000000..1e66bad3e91e --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake/fake_apps_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake/fake_apps_client.go new file mode 100644 index 000000000000..a47dcb1386a1 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake/fake_apps_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1alpha1 "k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1" + rest "k8s.io/client-go/1.4/rest" + testing "k8s.io/client-go/1.4/testing" +) + +type FakeApps struct { + *testing.Fake +} + +func (c *FakeApps) PetSets(namespace string) v1alpha1.PetSetInterface { + return &FakePetSets{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeApps) GetRESTClient() *rest.RESTClient { + return nil +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake/fake_petset.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake/fake_petset.go new file mode 100644 index 000000000000..978ad4454823 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/fake/fake_petset.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/client-go/1.4/pkg/api" + unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1" + labels "k8s.io/client-go/1.4/pkg/labels" + watch "k8s.io/client-go/1.4/pkg/watch" + testing "k8s.io/client-go/1.4/testing" +) + +// FakePetSets implements PetSetInterface +type FakePetSets struct { + Fake *FakeApps + ns string +} + +var petsetsResource = unversioned.GroupVersionResource{Group: "apps", Version: "v1alpha1", Resource: "petsets"} + +func (c *FakePetSets) Create(petSet *v1alpha1.PetSet) (result *v1alpha1.PetSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(petsetsResource, c.ns, petSet), &v1alpha1.PetSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PetSet), err +} + +func (c *FakePetSets) Update(petSet *v1alpha1.PetSet) (result *v1alpha1.PetSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(petsetsResource, c.ns, petSet), &v1alpha1.PetSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PetSet), err +} + +func (c *FakePetSets) UpdateStatus(petSet *v1alpha1.PetSet) (*v1alpha1.PetSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(petsetsResource, "status", c.ns, petSet), &v1alpha1.PetSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PetSet), err +} + +func (c *FakePetSets) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(petsetsResource, c.ns, name), &v1alpha1.PetSet{}) + + return err +} + +func (c *FakePetSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := testing.NewDeleteCollectionAction(petsetsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.PetSetList{}) + return err +} + +func (c *FakePetSets) Get(name string) (result *v1alpha1.PetSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(petsetsResource, c.ns, name), &v1alpha1.PetSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PetSet), err +} + +func (c *FakePetSets) List(opts api.ListOptions) (result *v1alpha1.PetSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(petsetsResource, c.ns, opts), &v1alpha1.PetSetList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.PetSetList{} + for _, item := range obj.(*v1alpha1.PetSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested petSets. +func (c *FakePetSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(petsetsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched petSet. +func (c *FakePetSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.PetSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(petsetsResource, c.ns, name, data, subresources...), &v1alpha1.PetSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.PetSet), err +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/generated_expansion.go new file mode 100644 index 000000000000..439a241b040c --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type PetSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/petset.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/petset.go new file mode 100644 index 000000000000..836adee1d80f --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/apps/v1alpha1/petset.go @@ -0,0 +1,165 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1" + watch "k8s.io/client-go/1.4/pkg/watch" +) + +// PetSetsGetter has a method to return a PetSetInterface. +// A group's client should implement this interface. +type PetSetsGetter interface { + PetSets(namespace string) PetSetInterface +} + +// PetSetInterface has methods to work with PetSet resources. +type PetSetInterface interface { + Create(*v1alpha1.PetSet) (*v1alpha1.PetSet, error) + Update(*v1alpha1.PetSet) (*v1alpha1.PetSet, error) + UpdateStatus(*v1alpha1.PetSet) (*v1alpha1.PetSet, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1alpha1.PetSet, error) + List(opts api.ListOptions) (*v1alpha1.PetSetList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.PetSet, err error) + PetSetExpansion +} + +// petSets implements PetSetInterface +type petSets struct { + client *AppsClient + ns string +} + +// newPetSets returns a PetSets +func newPetSets(c *AppsClient, namespace string) *petSets { + return &petSets{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a petSet and creates it. Returns the server's representation of the petSet, and an error, if there is any. +func (c *petSets) Create(petSet *v1alpha1.PetSet) (result *v1alpha1.PetSet, err error) { + result = &v1alpha1.PetSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("petsets"). + Body(petSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a petSet and updates it. Returns the server's representation of the petSet, and an error, if there is any. +func (c *petSets) Update(petSet *v1alpha1.PetSet) (result *v1alpha1.PetSet, err error) { + result = &v1alpha1.PetSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("petsets"). + Name(petSet.Name). + Body(petSet). + Do(). + Into(result) + return +} + +func (c *petSets) UpdateStatus(petSet *v1alpha1.PetSet) (result *v1alpha1.PetSet, err error) { + result = &v1alpha1.PetSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("petsets"). + Name(petSet.Name). + SubResource("status"). + Body(petSet). + Do(). + Into(result) + return +} + +// Delete takes name of the petSet and deletes it. Returns an error if one occurs. +func (c *petSets) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("petsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *petSets) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("petsets"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the petSet, and returns the corresponding petSet object, and an error if there is any. +func (c *petSets) Get(name string) (result *v1alpha1.PetSet, err error) { + result = &v1alpha1.PetSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("petsets"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PetSets that match those selectors. +func (c *petSets) List(opts api.ListOptions) (result *v1alpha1.PetSetList, err error) { + result = &v1alpha1.PetSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("petsets"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested petSets. +func (c *petSets) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("petsets"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched petSet. +func (c *petSets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.PetSet, err error) { + result = &v1alpha1.PetSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("petsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/authentication_client.go new file mode 100644 index 000000000000..4e8a0a25f49f --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + registered "k8s.io/client-go/1.4/pkg/apimachinery/registered" + serializer "k8s.io/client-go/1.4/pkg/runtime/serializer" + rest "k8s.io/client-go/1.4/rest" +) + +type AuthenticationInterface interface { + GetRESTClient() *rest.RESTClient + TokenReviewsGetter +} + +// AuthenticationClient is used to interact with features provided by the Authentication group. +type AuthenticationClient struct { + *rest.RESTClient +} + +func (c *AuthenticationClient) TokenReviews() TokenReviewInterface { + return newTokenReviews(c) +} + +// NewForConfig creates a new AuthenticationClient for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationClient{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationClient for the given RESTClient. +func New(c *rest.RESTClient) *AuthenticationClient { + return &AuthenticationClient{c} +} + +func setConfigDefaults(config *rest.Config) error { + // if authentication group is not registered, return an error + g, err := registered.Group("authentication.k8s.io") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationClient) GetRESTClient() *rest.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/conversion.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/doc.go similarity index 58% rename from vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/conversion.go rename to vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/doc.go index 56a91dde4623..c9fe20ef1ba4 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/conversion.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/doc.go @@ -14,23 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 - -import ( - "fmt" +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] - "k8s.io/client-go/1.4/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.String(), "Cluster", - func(label, value string) (string, string, error) { - switch label { - case "metadata.name": - return label, value, nil - default: - return "", "", fmt.Errorf("field label not supported: %s", label) - } - }, - ) -} +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake/doc.go new file mode 100644 index 000000000000..1e66bad3e91e --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go new file mode 100644 index 000000000000..bd770646b262 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1" + rest "k8s.io/client-go/1.4/rest" + testing "k8s.io/client-go/1.4/testing" +) + +type FakeAuthentication struct { + *testing.Fake +} + +func (c *FakeAuthentication) TokenReviews() v1beta1.TokenReviewInterface { + return &FakeTokenReviews{c} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAuthentication) GetRESTClient() *rest.RESTClient { + return nil +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go new file mode 100644 index 000000000000..6508f5a94aa3 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go @@ -0,0 +1,22 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +// FakeTokenReviews implements TokenReviewInterface +type FakeTokenReviews struct { + Fake *FakeAuthentication +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/defaults.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/generated_expansion.go similarity index 83% rename from vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/defaults.go rename to vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/generated_expansion.go index c6edc015d373..f307100297e3 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/defaults.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -16,10 +16,4 @@ limitations under the License. package v1beta1 -import ( - "k8s.io/client-go/1.4/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return nil -} +type TokenReviewExpansion interface{} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/tokenreview.go new file mode 100644 index 000000000000..dc9313bae3d2 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// TokenReviewsGetter has a method to return a TokenReviewInterface. +// A group's client should implement this interface. +type TokenReviewsGetter interface { + TokenReviews() TokenReviewInterface +} + +// TokenReviewInterface has methods to work with TokenReview resources. +type TokenReviewInterface interface { + TokenReviewExpansion +} + +// tokenReviews implements TokenReviewInterface +type tokenReviews struct { + client *AuthenticationClient +} + +// newTokenReviews returns a TokenReviews +func newTokenReviews(c *AuthenticationClient) *tokenReviews { + return &tokenReviews{ + client: c, + } +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1/doc.go index a3af9681d519..c9fe20ef1ba4 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1/fake/doc.go index 1b806dc97d2e..1e66bad3e91e 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/authorization/v1beta1/fake/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1/doc.go index 446dcdd02fc8..50f4a7b4e3d6 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1/fake/doc.go index 1b806dc97d2e..1e66bad3e91e 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/autoscaling/v1/fake/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/batch/v1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/batch/v1/doc.go index 446dcdd02fc8..50f4a7b4e3d6 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/batch/v1/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/batch/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/batch/v1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/batch/v1/fake/doc.go index 1b806dc97d2e..1e66bad3e91e 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/batch/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/batch/v1/fake/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/certificates_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/certificates_client.go new file mode 100644 index 000000000000..279c4f765d26 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/certificates_client.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + registered "k8s.io/client-go/1.4/pkg/apimachinery/registered" + serializer "k8s.io/client-go/1.4/pkg/runtime/serializer" + rest "k8s.io/client-go/1.4/rest" +) + +type CertificatesInterface interface { + GetRESTClient() *rest.RESTClient + CertificateSigningRequestsGetter +} + +// CertificatesClient is used to interact with features provided by the Certificates group. +type CertificatesClient struct { + *rest.RESTClient +} + +func (c *CertificatesClient) CertificateSigningRequests() CertificateSigningRequestInterface { + return newCertificateSigningRequests(c) +} + +// NewForConfig creates a new CertificatesClient for the given config. +func NewForConfig(c *rest.Config) (*CertificatesClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CertificatesClient{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesClient for the given RESTClient. +func New(c *rest.RESTClient) *CertificatesClient { + return &CertificatesClient{c} +} + +func setConfigDefaults(config *rest.Config) error { + // if certificates group is not registered, return an error + g, err := registered.Group("certificates.k8s.io") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesClient) GetRESTClient() *rest.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/certificatesigningrequest.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/certificatesigningrequest.go new file mode 100644 index 000000000000..4aa8af11030d --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/certificatesigningrequest.go @@ -0,0 +1,154 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/certificates/v1alpha1" + watch "k8s.io/client-go/1.4/pkg/watch" +) + +// CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. +// A group's client should implement this interface. +type CertificateSigningRequestsGetter interface { + CertificateSigningRequests() CertificateSigningRequestInterface +} + +// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. +type CertificateSigningRequestInterface interface { + Create(*v1alpha1.CertificateSigningRequest) (*v1alpha1.CertificateSigningRequest, error) + Update(*v1alpha1.CertificateSigningRequest) (*v1alpha1.CertificateSigningRequest, error) + UpdateStatus(*v1alpha1.CertificateSigningRequest) (*v1alpha1.CertificateSigningRequest, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1alpha1.CertificateSigningRequest, error) + List(opts api.ListOptions) (*v1alpha1.CertificateSigningRequestList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.CertificateSigningRequest, err error) + CertificateSigningRequestExpansion +} + +// certificateSigningRequests implements CertificateSigningRequestInterface +type certificateSigningRequests struct { + client *CertificatesClient +} + +// newCertificateSigningRequests returns a CertificateSigningRequests +func newCertificateSigningRequests(c *CertificatesClient) *certificateSigningRequests { + return &certificateSigningRequests{ + client: c, + } +} + +// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Create(certificateSigningRequest *v1alpha1.CertificateSigningRequest) (result *v1alpha1.CertificateSigningRequest, err error) { + result = &v1alpha1.CertificateSigningRequest{} + err = c.client.Post(). + Resource("certificatesigningrequests"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Update(certificateSigningRequest *v1alpha1.CertificateSigningRequest) (result *v1alpha1.CertificateSigningRequest, err error) { + result = &v1alpha1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1alpha1.CertificateSigningRequest) (result *v1alpha1.CertificateSigningRequest, err error) { + result = &v1alpha1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + SubResource("status"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. +func (c *certificateSigningRequests) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("certificatesigningrequests"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateSigningRequests) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("certificatesigningrequests"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. +func (c *certificateSigningRequests) Get(name string) (result *v1alpha1.CertificateSigningRequest, err error) { + result = &v1alpha1.CertificateSigningRequest{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. +func (c *certificateSigningRequests) List(opts api.ListOptions) (result *v1alpha1.CertificateSigningRequestList, err error) { + result = &v1alpha1.CertificateSigningRequestList{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequests. +func (c *certificateSigningRequests) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("certificatesigningrequests"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched certificateSigningRequest. +func (c *certificateSigningRequests) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.CertificateSigningRequest, err error) { + result = &v1alpha1.CertificateSigningRequest{} + err = c.client.Patch(pt). + Resource("certificatesigningrequests"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/doc.go new file mode 100644 index 000000000000..e0b389504493 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake/doc.go new file mode 100644 index 000000000000..1e66bad3e91e --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go new file mode 100644 index 000000000000..a109efbc9363 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake/fake_certificates_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1alpha1 "k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1" + rest "k8s.io/client-go/1.4/rest" + testing "k8s.io/client-go/1.4/testing" +) + +type FakeCertificates struct { + *testing.Fake +} + +func (c *FakeCertificates) CertificateSigningRequests() v1alpha1.CertificateSigningRequestInterface { + return &FakeCertificateSigningRequests{c} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCertificates) GetRESTClient() *rest.RESTClient { + return nil +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake/fake_certificatesigningrequest.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake/fake_certificatesigningrequest.go new file mode 100644 index 000000000000..f8e9e69f5695 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/fake/fake_certificatesigningrequest.go @@ -0,0 +1,118 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/client-go/1.4/pkg/api" + unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/certificates/v1alpha1" + labels "k8s.io/client-go/1.4/pkg/labels" + watch "k8s.io/client-go/1.4/pkg/watch" + testing "k8s.io/client-go/1.4/testing" +) + +// FakeCertificateSigningRequests implements CertificateSigningRequestInterface +type FakeCertificateSigningRequests struct { + Fake *FakeCertificates +} + +var certificatesigningrequestsResource = unversioned.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1alpha1", Resource: "certificatesigningrequests"} + +func (c *FakeCertificateSigningRequests) Create(certificateSigningRequest *v1alpha1.CertificateSigningRequest) (result *v1alpha1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1alpha1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequest), err +} + +func (c *FakeCertificateSigningRequests) Update(certificateSigningRequest *v1alpha1.CertificateSigningRequest) (result *v1alpha1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(certificatesigningrequestsResource, certificateSigningRequest), &v1alpha1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequest), err +} + +func (c *FakeCertificateSigningRequests) UpdateStatus(certificateSigningRequest *v1alpha1.CertificateSigningRequest) (*v1alpha1.CertificateSigningRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(certificatesigningrequestsResource, "status", certificateSigningRequest), &v1alpha1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequest), err +} + +func (c *FakeCertificateSigningRequests) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(certificatesigningrequestsResource, name), &v1alpha1.CertificateSigningRequest{}) + return err +} + +func (c *FakeCertificateSigningRequests) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(certificatesigningrequestsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.CertificateSigningRequestList{}) + return err +} + +func (c *FakeCertificateSigningRequests) Get(name string) (result *v1alpha1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(certificatesigningrequestsResource, name), &v1alpha1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequest), err +} + +func (c *FakeCertificateSigningRequests) List(opts api.ListOptions) (result *v1alpha1.CertificateSigningRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(certificatesigningrequestsResource, opts), &v1alpha1.CertificateSigningRequestList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.CertificateSigningRequestList{} + for _, item := range obj.(*v1alpha1.CertificateSigningRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequests. +func (c *FakeCertificateSigningRequests) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(certificatesigningrequestsResource, opts)) +} + +// Patch applies the patch and returns the patched certificateSigningRequest. +func (c *FakeCertificateSigningRequests) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.CertificateSigningRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(certificatesigningrequestsResource, name, data, subresources...), &v1alpha1.CertificateSigningRequest{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequest), err +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/generated_expansion.go new file mode 100644 index 000000000000..1f67dc9d8009 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/certificates/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type CertificateSigningRequestExpansion interface{} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/doc.go index 446dcdd02fc8..50f4a7b4e3d6 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // This package has the automatically generated typed clients. package v1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/fake/doc.go index 1b806dc97d2e..1e66bad3e91e 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/fake/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/fake/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/fake/fake_pod_expansion.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/fake/fake_pod_expansion.go index ced693a2188e..91e539f4c638 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/fake/fake_pod_expansion.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/fake/fake_pod_expansion.go @@ -18,7 +18,6 @@ package fake import ( "k8s.io/client-go/1.4/pkg/api/v1" - policy "k8s.io/client-go/1.4/pkg/apis/policy/v1alpha1" "k8s.io/client-go/1.4/rest" "k8s.io/client-go/1.4/testing" ) @@ -45,14 +44,3 @@ func (c *FakePods) GetLogs(name string, opts *v1.PodLogOptions) *rest.Request { _, _ = c.Fake.Invokes(action, &v1.Pod{}) return &rest.Request{} } - -func (c *FakePods) Evict(eviction *policy.Eviction) error { - action := testing.CreateActionImpl{} - action.Verb = "create" - action.Resource = podsResource - action.Subresource = "evictions" - action.Object = eviction - - _, err := c.Fake.Invokes(action, eviction) - return err -} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/pod_expansion.go index 064a9805a676..8ece7798e3e6 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/pod_expansion.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/core/v1/pod_expansion.go @@ -19,14 +19,12 @@ package v1 import ( "k8s.io/client-go/1.4/pkg/api" "k8s.io/client-go/1.4/pkg/api/v1" - policy "k8s.io/client-go/1.4/pkg/apis/policy/v1alpha1" "k8s.io/client-go/1.4/rest" ) // The PodExpansion interface allows manually adding extra methods to the PodInterface. type PodExpansion interface { Bind(binding *v1.Binding) error - Evict(eviction *policy.Eviction) error GetLogs(name string, opts *v1.PodLogOptions) *rest.Request } @@ -35,10 +33,6 @@ func (c *pods) Bind(binding *v1.Binding) error { return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() } -func (c *pods) Evict(eviction *policy.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do().Error() -} - // Get constructs a request for getting the logs for a pod func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *rest.Request { return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, api.ParameterCodec) diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/doc.go index a3af9681d519..c9fe20ef1ba4 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // This package has the automatically generated typed clients. package v1beta1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/extensions_client.go index 1dfb3ed15129..51e19889fbc8 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,7 +32,6 @@ type ExtensionsInterface interface { PodSecurityPoliciesGetter ReplicaSetsGetter ScalesGetter - StorageClassesGetter ThirdPartyResourcesGetter } @@ -69,10 +68,6 @@ func (c *ExtensionsClient) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } -func (c *ExtensionsClient) StorageClasses() StorageClassInterface { - return newStorageClasses(c) -} - func (c *ExtensionsClient) ThirdPartyResources() ThirdPartyResourceInterface { return newThirdPartyResources(c) } diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/doc.go index 1b806dc97d2e..1e66bad3e91e 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go index 8d2c7047ec4a..06b7b29da5df 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go @@ -54,10 +54,6 @@ func (c *FakeExtensions) Scales(namespace string) v1beta1.ScaleInterface { return &FakeScales{c, namespace} } -func (c *FakeExtensions) StorageClasses() v1beta1.StorageClassInterface { - return &FakeStorageClasses{c} -} - func (c *FakeExtensions) ThirdPartyResources() v1beta1.ThirdPartyResourceInterface { return &FakeThirdPartyResources{c} } diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/generated_expansion.go index ece8b6c7aec2..60cf61d4b26f 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -26,6 +26,4 @@ type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} -type StorageClassExpansion interface{} - type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1/doc.go index b38e8275beda..e0b389504493 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // This package has the automatically generated typed clients. package v1alpha1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1/fake/doc.go index 1b806dc97d2e..1e66bad3e91e 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1/fake/doc.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/policy/v1alpha1/fake/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,authorization/v1beta1,autoscaling/v1,batch/v1,extensions/v1beta1,policy/v1alpha1] +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] // Package fake has the automatically generated clients. package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/clusterrole.go new file mode 100644 index 000000000000..6865ab3bd5f2 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -0,0 +1,141 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/rbac/v1alpha1" + watch "k8s.io/client-go/1.4/pkg/watch" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Update(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1alpha1.ClusterRole, error) + List(opts api.ListOptions) (*v1alpha1.ClusterRoleList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client *RbacClient +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacClient) *clusterRoles { + return &clusterRoles{ + client: c, + } +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts api.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + result = &v1alpha1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterroles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go new file mode 100644 index 000000000000..121ff48adf6f --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -0,0 +1,141 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/rbac/v1alpha1" + watch "k8s.io/client-go/1.4/pkg/watch" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Update(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1alpha1.ClusterRoleBinding, error) + List(opts api.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client *RbacClient +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacClient) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c, + } +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts api.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + result = &v1alpha1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Resource("clusterrolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/doc.go new file mode 100644 index 000000000000..e0b389504493 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/doc.go new file mode 100644 index 000000000000..1e66bad3e91e --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go new file mode 100644 index 000000000000..7c257c107157 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go @@ -0,0 +1,109 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/client-go/1.4/pkg/api" + unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/rbac/v1alpha1" + labels "k8s.io/client-go/1.4/pkg/labels" + watch "k8s.io/client-go/1.4/pkg/watch" + testing "k8s.io/client-go/1.4/testing" +) + +// FakeClusterRoles implements ClusterRoleInterface +type FakeClusterRoles struct { + Fake *FakeRbac +} + +var clusterrolesResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterroles"} + +func (c *FakeClusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} + +func (c *FakeClusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolesResource, clusterRole), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} + +func (c *FakeClusterRoles) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) + return err +} + +func (c *FakeClusterRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleList{}) + return err +} + +func (c *FakeClusterRoles) Get(name string) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterrolesResource, name), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} + +func (c *FakeClusterRoles) List(opts api.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterrolesResource, opts), &v1alpha1.ClusterRoleList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterRoleList{} + for _, item := range obj.(*v1alpha1.ClusterRoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *FakeClusterRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterrolesResource, opts)) +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *FakeClusterRoles) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterrolesResource, name, data, subresources...), &v1alpha1.ClusterRole{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRole), err +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go new file mode 100644 index 000000000000..0a82f598f033 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go @@ -0,0 +1,109 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/client-go/1.4/pkg/api" + unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/rbac/v1alpha1" + labels "k8s.io/client-go/1.4/pkg/labels" + watch "k8s.io/client-go/1.4/pkg/watch" + testing "k8s.io/client-go/1.4/testing" +) + +// FakeClusterRoleBindings implements ClusterRoleBindingInterface +type FakeClusterRoleBindings struct { + Fake *FakeRbac +} + +var clusterrolebindingsResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "clusterrolebindings"} + +func (c *FakeClusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clusterrolebindingsResource, clusterRoleBinding), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) + return err +} + +func (c *FakeClusterRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clusterrolebindingsResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterRoleBindingList{}) + return err +} + +func (c *FakeClusterRoleBindings) Get(name string) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clusterrolebindingsResource, name), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} + +func (c *FakeClusterRoleBindings) List(opts api.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clusterrolebindingsResource, opts), &v1alpha1.ClusterRoleBindingList{}) + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterRoleBindingList{} + for _, item := range obj.(*v1alpha1.ClusterRoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *FakeClusterRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clusterrolebindingsResource, opts)) +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *FakeClusterRoleBindings) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clusterrolebindingsResource, name, data, subresources...), &v1alpha1.ClusterRoleBinding{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterRoleBinding), err +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go new file mode 100644 index 000000000000..70a1ee74f30f --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1alpha1 "k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1" + rest "k8s.io/client-go/1.4/rest" + testing "k8s.io/client-go/1.4/testing" +) + +type FakeRbac struct { + *testing.Fake +} + +func (c *FakeRbac) ClusterRoles() v1alpha1.ClusterRoleInterface { + return &FakeClusterRoles{c} +} + +func (c *FakeRbac) ClusterRoleBindings() v1alpha1.ClusterRoleBindingInterface { + return &FakeClusterRoleBindings{c} +} + +func (c *FakeRbac) Roles(namespace string) v1alpha1.RoleInterface { + return &FakeRoles{c, namespace} +} + +func (c *FakeRbac) RoleBindings(namespace string) v1alpha1.RoleBindingInterface { + return &FakeRoleBindings{c, namespace} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeRbac) GetRESTClient() *rest.RESTClient { + return nil +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go new file mode 100644 index 000000000000..71260ce8f7d0 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go @@ -0,0 +1,117 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/client-go/1.4/pkg/api" + unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/rbac/v1alpha1" + labels "k8s.io/client-go/1.4/pkg/labels" + watch "k8s.io/client-go/1.4/pkg/watch" + testing "k8s.io/client-go/1.4/testing" +) + +// FakeRoles implements RoleInterface +type FakeRoles struct { + Fake *FakeRbac + ns string +} + +var rolesResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "roles"} + +func (c *FakeRoles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} + +func (c *FakeRoles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolesResource, c.ns, role), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} + +func (c *FakeRoles) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolesResource, c.ns, name), &v1alpha1.Role{}) + + return err +} + +func (c *FakeRoles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.RoleList{}) + return err +} + +func (c *FakeRoles) Get(name string) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(rolesResource, c.ns, name), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} + +func (c *FakeRoles) List(opts api.ListOptions) (result *v1alpha1.RoleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(rolesResource, c.ns, opts), &v1alpha1.RoleList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RoleList{} + for _, item := range obj.(*v1alpha1.RoleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *FakeRoles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(rolesResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched role. +func (c *FakeRoles) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(rolesResource, c.ns, name, data, subresources...), &v1alpha1.Role{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Role), err +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go new file mode 100644 index 000000000000..cbd1188756b5 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go @@ -0,0 +1,117 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + api "k8s.io/client-go/1.4/pkg/api" + unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/rbac/v1alpha1" + labels "k8s.io/client-go/1.4/pkg/labels" + watch "k8s.io/client-go/1.4/pkg/watch" + testing "k8s.io/client-go/1.4/testing" +) + +// FakeRoleBindings implements RoleBindingInterface +type FakeRoleBindings struct { + Fake *FakeRbac + ns string +} + +var rolebindingsResource = unversioned.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1alpha1", Resource: "rolebindings"} + +func (c *FakeRoleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} + +func (c *FakeRoleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(rolebindingsResource, c.ns, roleBinding), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} + +func (c *FakeRoleBindings) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) + + return err +} + +func (c *FakeRoleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + action := testing.NewDeleteCollectionAction(rolebindingsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.RoleBindingList{}) + return err +} + +func (c *FakeRoleBindings) Get(name string) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(rolebindingsResource, c.ns, name), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} + +func (c *FakeRoleBindings) List(opts api.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(rolebindingsResource, c.ns, opts), &v1alpha1.RoleBindingList{}) + + if obj == nil { + return nil, err + } + + label := opts.LabelSelector + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.RoleBindingList{} + for _, item := range obj.(*v1alpha1.RoleBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *FakeRoleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(rolebindingsResource, c.ns, opts)) + +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *FakeRoleBindings) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(rolebindingsResource, c.ns, name, data, subresources...), &v1alpha1.RoleBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.RoleBinding), err +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/generated_expansion.go new file mode 100644 index 000000000000..86def26631fd --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/rbac_client.go new file mode 100644 index 000000000000..8dfcbabfbca2 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -0,0 +1,111 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + registered "k8s.io/client-go/1.4/pkg/apimachinery/registered" + serializer "k8s.io/client-go/1.4/pkg/runtime/serializer" + rest "k8s.io/client-go/1.4/rest" +) + +type RbacInterface interface { + GetRESTClient() *rest.RESTClient + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacClient is used to interact with features provided by the Rbac group. +type RbacClient struct { + *rest.RESTClient +} + +func (c *RbacClient) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacClient) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacClient) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacClient) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacClient for the given config. +func NewForConfig(c *rest.Config) (*RbacClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacClient{client}, nil +} + +// NewForConfigOrDie creates a new RbacClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacClient for the given RESTClient. +func New(c *rest.RESTClient) *RbacClient { + return &RbacClient{c} +} + +func setConfigDefaults(config *rest.Config) error { + // if rbac group is not registered, return an error + g, err := registered.Group("rbac.authorization.k8s.io") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacClient) GetRESTClient() *rest.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/role.go new file mode 100644 index 000000000000..c86bb5e5e389 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/role.go @@ -0,0 +1,151 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/rbac/v1alpha1" + watch "k8s.io/client-go/1.4/pkg/watch" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1alpha1.Role) (*v1alpha1.Role, error) + Update(*v1alpha1.Role) (*v1alpha1.Role, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1alpha1.Role, error) + List(opts api.ListOptions) (*v1alpha1.RoleList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client *RbacClient + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacClient, namespace string) *roles { + return &roles{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts api.ListOptions) (result *v1alpha1.RoleList, err error) { + result = &v1alpha1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/rolebinding.go new file mode 100644 index 000000000000..3fd3964122d1 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -0,0 +1,151 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + v1alpha1 "k8s.io/client-go/1.4/pkg/apis/rbac/v1alpha1" + watch "k8s.io/client-go/1.4/pkg/watch" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Update(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Delete(name string, options *api.DeleteOptions) error + DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error + Get(name string) (*v1alpha1.RoleBinding, error) + List(opts api.ListOptions) (*v1alpha1.RoleBindingList, error) + Watch(opts api.ListOptions) (watch.Interface, error) + Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client *RbacClient + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacClient, namespace string) *roleBindings { + return &roleBindings{ + client: c, + ns: namespace, + } +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *api.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, api.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts api.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + result = &v1alpha1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts api.ListOptions) (watch.Interface, error) { + return c.client.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, api.ParameterCodec). + Watch() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/doc.go new file mode 100644 index 000000000000..c9fe20ef1ba4 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake/doc.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake/doc.go new file mode 100644 index 000000000000..1e66bad3e91e --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package is generated by client-gen with arguments: --clientset-name=release_1_4 --input=[api/v1,apps/v1alpha1,authentication/v1beta1,authorization/v1beta1,autoscaling/v1,batch/v1,certificates/v1alpha1,extensions/v1beta1,policy/v1alpha1,rbac/v1alpha1,storage/v1beta1] + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go new file mode 100644 index 000000000000..59235fd8c998 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + v1beta1 "k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1" + rest "k8s.io/client-go/1.4/rest" + testing "k8s.io/client-go/1.4/testing" +) + +type FakeStorage struct { + *testing.Fake +} + +func (c *FakeStorage) StorageClasses() v1beta1.StorageClassInterface { + return &FakeStorageClasses{c} +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeStorage) GetRESTClient() *rest.RESTClient { + return nil +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/fake_storageclass.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go similarity index 96% rename from vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/fake_storageclass.go rename to vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go index 91f09001df80..a622b7f07645 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/fake/fake_storageclass.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go @@ -19,7 +19,7 @@ package fake import ( api "k8s.io/client-go/1.4/pkg/api" unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" - v1beta1 "k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1" + v1beta1 "k8s.io/client-go/1.4/pkg/apis/storage/v1beta1" labels "k8s.io/client-go/1.4/pkg/labels" watch "k8s.io/client-go/1.4/pkg/watch" testing "k8s.io/client-go/1.4/testing" @@ -27,10 +27,10 @@ import ( // FakeStorageClasses implements StorageClassInterface type FakeStorageClasses struct { - Fake *FakeExtensions + Fake *FakeStorage } -var storageclassesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "storageclasses"} +var storageclassesResource = unversioned.GroupVersionResource{Group: "storage.k8s.io", Version: "v1beta1", Resource: "storageclasses"} func (c *FakeStorageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { obj, err := c.Fake. diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/generated_expansion.go new file mode 100644 index 000000000000..b18dda009f26 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/storage_client.go new file mode 100644 index 000000000000..7fe04b38c408 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/storage_client.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + registered "k8s.io/client-go/1.4/pkg/apimachinery/registered" + serializer "k8s.io/client-go/1.4/pkg/runtime/serializer" + rest "k8s.io/client-go/1.4/rest" +) + +type StorageInterface interface { + GetRESTClient() *rest.RESTClient + StorageClassesGetter +} + +// StorageClient is used to interact with features provided by the Storage group. +type StorageClient struct { + *rest.RESTClient +} + +func (c *StorageClient) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + +// NewForConfig creates a new StorageClient for the given config. +func NewForConfig(c *rest.Config) (*StorageClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageClient{client}, nil +} + +// NewForConfigOrDie creates a new StorageClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageClient for the given RESTClient. +func New(c *rest.RESTClient) *StorageClient { + return &StorageClient{c} +} + +func setConfigDefaults(config *rest.Config) error { + // if storage group is not registered, return an error + g, err := registered.Group("storage.k8s.io") + if err != nil { + return err + } + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO: Unconditionally set the config.Version, until we fix the config. + //if config.Version == "" { + copyGroupVersion := g.GroupVersion + config.GroupVersion = ©GroupVersion + //} + + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs} + + return nil +} + +// GetRESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageClient) GetRESTClient() *rest.RESTClient { + if c == nil { + return nil + } + return c.RESTClient +} diff --git a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/storageclass.go b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/storageclass.go similarity index 96% rename from vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/storageclass.go rename to vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/storageclass.go index fc1f651d125e..9b434af7589e 100644 --- a/vendor/k8s.io/client-go/1.4/kubernetes/typed/extensions/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/1.4/kubernetes/typed/storage/v1beta1/storageclass.go @@ -18,7 +18,7 @@ package v1beta1 import ( api "k8s.io/client-go/1.4/pkg/api" - v1beta1 "k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1" + v1beta1 "k8s.io/client-go/1.4/pkg/apis/storage/v1beta1" watch "k8s.io/client-go/1.4/pkg/watch" ) @@ -43,11 +43,11 @@ type StorageClassInterface interface { // storageClasses implements StorageClassInterface type storageClasses struct { - client *ExtensionsClient + client *StorageClient } // newStorageClasses returns a StorageClasses -func newStorageClasses(c *ExtensionsClient) *storageClasses { +func newStorageClasses(c *StorageClient) *storageClasses { return &storageClasses{ client: c, } diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/context.go b/vendor/k8s.io/client-go/1.4/pkg/api/context.go index df6997558755..3356ce78cc0a 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/context.go +++ b/vendor/k8s.io/client-go/1.4/pkg/api/context.go @@ -59,6 +59,9 @@ const ( // uidKey is the context key for the uid to assign to an object on create. uidKey + + // userAgentKey is the context key for the request user agent. + userAgentKey ) // NewContext instantiates a base context object for request flows. @@ -136,3 +139,14 @@ func UIDFrom(ctx Context) (types.UID, bool) { uid, ok := ctx.Value(uidKey).(types.UID) return uid, ok } + +// WithUserAgent returns a copy of parent in which the user value is set +func WithUserAgent(parent Context, userAgent string) Context { + return WithValue(parent, userAgentKey, userAgent) +} + +// UserAgentFrom returns the value of the userAgent key on the ctx +func UserAgentFrom(ctx Context) (string, bool) { + userAgent, ok := ctx.Value(userAgentKey).(string) + return userAgent, ok +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/helpers.go b/vendor/k8s.io/client-go/1.4/pkg/api/helpers.go index 96086b0cf57a..103e77456dab 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/helpers.go +++ b/vendor/k8s.io/client-go/1.4/pkg/api/helpers.go @@ -526,6 +526,20 @@ func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool { return tolerated } +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} + func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (AvoidPods, error) { var avoidPods AvoidPods if len(annotations) > 0 && annotations[PreferAvoidPodsAnnotationKey] != "" { diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/testapi/testapi.go b/vendor/k8s.io/client-go/1.4/pkg/api/testapi/testapi.go index 23bc7fc6e66f..39b62828c926 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/testapi/testapi.go +++ b/vendor/k8s.io/client-go/1.4/pkg/api/testapi/testapi.go @@ -36,6 +36,7 @@ import ( "k8s.io/client-go/1.4/pkg/apis/imagepolicy" "k8s.io/client-go/1.4/pkg/apis/policy" "k8s.io/client-go/1.4/pkg/apis/rbac" + "k8s.io/client-go/1.4/pkg/apis/storage" "k8s.io/client-go/1.4/pkg/federation/apis/federation" "k8s.io/client-go/1.4/pkg/runtime" "k8s.io/client-go/1.4/pkg/runtime/serializer/recognizer" @@ -52,6 +53,7 @@ import ( _ "k8s.io/client-go/1.4/pkg/apis/imagepolicy/install" _ "k8s.io/client-go/1.4/pkg/apis/policy/install" _ "k8s.io/client-go/1.4/pkg/apis/rbac/install" + _ "k8s.io/client-go/1.4/pkg/apis/storage/install" _ "k8s.io/client-go/1.4/pkg/federation/apis/federation/install" ) @@ -66,6 +68,7 @@ var ( Federation TestGroup Rbac TestGroup Certificates TestGroup + Storage TestGroup ImagePolicy TestGroup serializer runtime.SerializerInfo @@ -218,6 +221,15 @@ func init() { externalTypes: api.Scheme.KnownTypes(externalGroupVersion), } } + if _, ok := Groups[storage.GroupName]; !ok { + externalGroupVersion := unversioned.GroupVersion{Group: storage.GroupName, Version: registered.GroupOrDie(storage.GroupName).GroupVersion.Version} + Groups[storage.GroupName] = TestGroup{ + externalGroupVersion: externalGroupVersion, + internalGroupVersion: storage.SchemeGroupVersion, + internalTypes: api.Scheme.KnownTypes(storage.SchemeGroupVersion), + externalTypes: api.Scheme.KnownTypes(externalGroupVersion), + } + } if _, ok := Groups[certificates.GroupName]; !ok { externalGroupVersion := unversioned.GroupVersion{Group: certificates.GroupName, Version: registered.GroupOrDie(certificates.GroupName).GroupVersion.Version} Groups[certificates.GroupName] = TestGroup{ @@ -247,6 +259,7 @@ func init() { Extensions = Groups[extensions.GroupName] Federation = Groups[federation.GroupName] Rbac = Groups[rbac.GroupName] + Storage = Groups[storage.GroupName] ImagePolicy = Groups[imagepolicy.GroupName] } diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/generated.proto index cab7058aac1b..bd72ad341b2d 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/generated.proto +++ b/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/generated.proto @@ -221,7 +221,7 @@ message ListMeta { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency optional string resourceVersion = 2; } @@ -245,12 +245,12 @@ message ServerAddressByClientCIDR { // Status is a return value for calls that don't return other objects. message Status { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional ListMeta metadata = 1; // Status of the operation. // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional string status = 2; // A human-readable description of the status of this operation. @@ -311,7 +311,7 @@ message StatusDetails { // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional string kind = 3; // The Causes array includes more details associated with the StatusReason @@ -366,13 +366,13 @@ message TypeMeta { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional string kind = 1; // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#resources optional string apiVersion = 2; } diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/types.go b/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/types.go index 7d632b496b14..18882a21018f 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/types.go @@ -35,13 +35,13 @@ type TypeMeta struct { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#resources APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` } @@ -58,7 +58,7 @@ type ListMeta struct { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` } @@ -75,12 +75,12 @@ type ExportOptions struct { type Status struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Status of the operation. // One of: "Success" or "Failure". - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` // A human-readable description of the status of this operation. Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` @@ -112,7 +112,7 @@ type StatusDetails struct { Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // The Causes array includes more details associated with the StatusReason // failure. Not all StatusReasons may provide detailed causes. diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/types_swagger_doc_generated.go index 3f08115a6cf0..e0355f51b6c7 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/types_swagger_doc_generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/api/unversioned/types_swagger_doc_generated.go @@ -123,7 +123,7 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { var map_ListMeta = map[string]string{ "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency", } func (ListMeta) SwaggerDoc() map[string]string { @@ -159,8 +159,8 @@ func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { var map_Status = map[string]string{ "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", "message": "A human-readable description of the status of this operation.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", @@ -186,7 +186,7 @@ var map_StatusDetails = map[string]string{ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried.", } @@ -197,8 +197,8 @@ func (StatusDetails) SwaggerDoc() map[string]string { var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#resources", } func (TypeMeta) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/v1/conversion.go b/vendor/k8s.io/client-go/1.4/pkg/api/v1/conversion.go index 9d8674c8afc0..86b075a325d9 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/v1/conversion.go +++ b/vendor/k8s.io/client-go/1.4/pkg/api/v1/conversion.go @@ -414,14 +414,22 @@ func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, return err } out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) } else { delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) } return nil } func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { // TODO: sometime after we move init container to stable, remove these conversions + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } + // Move the annotation to the internal repr. field if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { var values []ContainerStatus if err := json.Unmarshal([]byte(value), &values); err != nil { @@ -446,6 +454,7 @@ func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out out.Annotations[k] = v } delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) } return nil } @@ -575,6 +584,7 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error delete(out.Annotations, PodInitContainersAnnotationKey) delete(out.Annotations, PodInitContainersBetaAnnotationKey) delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) } if len(out.Spec.InitContainers) > 0 { value, err := json.Marshal(out.Spec.InitContainers) @@ -590,6 +600,7 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error return err } out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value) + out.Annotations[PodInitContainerStatusesBetaAnnotationKey] = string(value) } // We need to reset certain fields for mirror pods from pre-v1.1 kubelet @@ -627,6 +638,11 @@ func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error // back to the caller. in.Spec.InitContainers = values } + // If there is a beta annotation, copy to alpha key. + // See commit log for PR #31026 for why we do this. + if valueBeta, okBeta := in.Annotations[PodInitContainerStatusesBetaAnnotationKey]; okBeta { + in.Annotations[PodInitContainerStatusesAnnotationKey] = valueBeta + } if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok { var values []ContainerStatus if err := json.Unmarshal([]byte(value), &values); err != nil { @@ -653,6 +669,7 @@ func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error delete(out.Annotations, PodInitContainersAnnotationKey) delete(out.Annotations, PodInitContainersBetaAnnotationKey) delete(out.Annotations, PodInitContainerStatusesAnnotationKey) + delete(out.Annotations, PodInitContainerStatusesBetaAnnotationKey) } return nil } diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/v1/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/api/v1/generated.proto index 9552a5d558db..06da5fcc2c98 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/v1/generated.proto +++ b/vendor/k8s.io/client-go/1.4/pkg/api/v1/generated.proto @@ -37,13 +37,13 @@ option go_package = "v1"; // ownership management and SELinux relabeling. message AWSElasticBlockStoreVolumeSource { // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore optional string volumeID = 1; // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore // TODO: how do we prevent errors in the filesystem from compromising the machine optional string fsType = 2; @@ -55,7 +55,7 @@ message AWSElasticBlockStoreVolumeSource { // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". // If omitted, the default is "false". - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore optional bool readOnly = 4; } @@ -127,7 +127,7 @@ message AzureFileVolumeSource { // For example, a pod is bound to a node by a scheduler. message Binding { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // The target object that you want to bind to the standard object. @@ -147,27 +147,27 @@ message Capabilities { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it optional LocalObjectReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it optional bool readOnly = 6; } @@ -177,18 +177,18 @@ message CephFSVolumeSource { // Cinder volumes support ownership management and SELinux relabeling. message CinderVolumeSource { // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md optional bool readOnly = 3; } @@ -214,7 +214,7 @@ message ComponentCondition { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. message ComponentStatus { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // List of component conditions observed @@ -224,7 +224,7 @@ message ComponentStatus { // Status of all the conditions for the component as a list of ComponentStatus objects. message ComponentStatusList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of ComponentStatus objects. @@ -234,7 +234,7 @@ message ComponentStatusList { // ConfigMap holds configuration data for pods to consume. message ConfigMap { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Data contains the configuration data. @@ -253,7 +253,7 @@ message ConfigMapKeySelector { // ConfigMapList is a resource containing a list of ConfigMap objects. message ConfigMapList { - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is the list of ConfigMaps. @@ -294,7 +294,7 @@ message Container { optional string name = 1; // Docker image name. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md optional string image = 2; // Entrypoint array. Not executed within a shell. @@ -304,7 +304,7 @@ message Container { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands repeated string command = 3; // Arguments to the entrypoint. @@ -314,7 +314,7 @@ message Container { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands repeated string args = 4; // Container's working directory. @@ -338,7 +338,7 @@ message Container { // Compute Resources required by this container. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources optional ResourceRequirements resources = 8; // Pod volumes to mount into the container's filesystem. @@ -348,13 +348,13 @@ message Container { // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes optional Probe livenessProbe = 10; // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes optional Probe readinessProbe = 11; // Actions that the management system should take in response to container lifecycle events. @@ -372,11 +372,11 @@ message Container { // One of Always, Never, IfNotPresent. // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#updating-images + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#updating-images optional string imagePullPolicy = 14; // Security options the pod should run with. - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + // More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md optional SecurityContext securityContext = 15; // Whether this container should allocate a buffer for stdin in the container runtime. If this @@ -508,7 +508,7 @@ message ContainerStatus { optional int32 restartCount = 5; // The image the container is running. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md // TODO(dchen1107): Which image the container is running with? optional string image = 6; @@ -516,7 +516,7 @@ message ContainerStatus { optional string imageID = 7; // Container's ID in the format 'docker://'. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#container-information + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#container-information optional string containerID = 8; } @@ -582,7 +582,7 @@ message EmptyDirVolumeSource { // What type of storage medium should back this directory. // The default is "" which means to use the node's default medium. // Must be an empty string (default) or Memory. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir optional string medium = 1; } @@ -660,7 +660,7 @@ message EndpointSubset { // ] message Endpoints { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // The set of all endpoints is the union of all subsets. Addresses are placed into @@ -676,7 +676,7 @@ message Endpoints { // EndpointsList is a list of endpoints. message EndpointsList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of endpoints. @@ -723,7 +723,7 @@ message EnvVarSource { // TODO: Decide whether to store these separately or with the object they apply to. message Event { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // The object that this event is about. @@ -757,7 +757,7 @@ message Event { // EventList is a list of events. message EventList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of events @@ -854,13 +854,13 @@ message FlockerVolumeSource { // PDs support ownership management and SELinux relabeling. message GCEPersistentDiskVolumeSource { // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk optional string pdName = 1; // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk // TODO: how do we prevent errors in the filesystem from compromising the machine optional string fsType = 2; @@ -868,12 +868,12 @@ message GCEPersistentDiskVolumeSource { // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk optional int32 partition = 3; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk optional bool readOnly = 4; } @@ -898,16 +898,16 @@ message GitRepoVolumeSource { // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod optional bool readOnly = 3; } @@ -962,7 +962,7 @@ message Handler { // Host path volumes do not support ownership management or SELinux relabeling. message HostPathVolumeSource { // Path of the directory on the host. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath optional string path = 1; } @@ -986,7 +986,7 @@ message ISCSIVolumeSource { // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#iscsi + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#iscsi // TODO: how do we prevent errors in the filesystem from compromising the machine optional string fsType = 5; @@ -1020,7 +1020,7 @@ message Lifecycle { // PostStart is called immediately after a container is created. If the handler fails, // the container is terminated and restarted according to its restart policy. // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details optional Handler postStart = 1; // PreStop is called immediately before a container is terminated. @@ -1028,18 +1028,18 @@ message Lifecycle { // The reason for termination is passed to the handler. // Regardless of the outcome of the handler, the container is eventually terminated. // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details optional Handler preStop = 2; } // LimitRange sets resource usage limits for each kind of resource in a Namespace. message LimitRange { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Spec defines the limits enforced. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional LimitRangeSpec spec = 2; } @@ -1067,11 +1067,11 @@ message LimitRangeItem { // LimitRangeList is a list of LimitRange items. message LimitRangeList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_limit_range.md repeated LimitRange items = 2; } @@ -1084,7 +1084,7 @@ message LimitRangeSpec { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of objects @@ -1136,7 +1136,7 @@ message LoadBalancerStatus { // referenced object inside the same namespace. message LocalObjectReference { // Name of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names // TODO: Add other useful fields. apiVersion, kind, uid? optional string name = 1; } @@ -1145,17 +1145,17 @@ message LocalObjectReference { // NFS volumes do not support ownership management or SELinux relabeling. message NFSVolumeSource { // Server is the hostname or IP address of the NFS server. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs optional string server = 1; // Path that is exported by the NFS server. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs optional string path = 2; // ReadOnly here will force // the NFS export to be mounted with read-only permissions. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs optional bool readOnly = 3; } @@ -1163,40 +1163,40 @@ message NFSVolumeSource { // Use of multiple namespaces is optional. message Namespace { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional NamespaceSpec spec = 2; // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional NamespaceStatus status = 3; } // NamespaceList is a list of Namespaces. message NamespaceList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is the list of Namespace objects in the list. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md repeated Namespace items = 2; } // NamespaceSpec describes the attributes on a Namespace. message NamespaceSpec { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + // More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#finalizers repeated string finalizers = 1; } // NamespaceStatus is information about the current status of a Namespace. message NamespaceStatus { // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + // More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#phases optional string phase = 1; } @@ -1204,17 +1204,17 @@ message NamespaceStatus { // Each node will have a unique identifier in the cache (i.e. in etcd). message Node { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Spec defines the behavior of a node. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional NodeSpec spec = 2; // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional NodeStatus status = 3; } @@ -1278,7 +1278,7 @@ message NodeDaemonEndpoints { // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of nodes @@ -1336,14 +1336,14 @@ message NodeSpec { optional string providerID = 3; // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"` + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#manual-node-administration"` optional bool unschedulable = 4; } // NodeStatus is information about the current status of a node. message NodeStatus { // Capacity represents the total resources of a node. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity for more details. + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity for more details. map capacity = 1; // Allocatable represents the resources of a node that are available for scheduling. @@ -1351,24 +1351,24 @@ message NodeStatus { map allocatable = 2; // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-phase // The field is never populated, and now is deprecated. optional string phase = 3; // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-condition repeated NodeCondition conditions = 4; // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-addresses repeated NodeAddress addresses = 5; // Endpoints of daemons running on the Node. optional NodeDaemonEndpoints daemonEndpoints = 6; // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-info optional NodeSystemInfo nodeInfo = 7; // List of container images on this node @@ -1431,7 +1431,7 @@ message ObjectMeta { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names optional string name = 1; // GenerateName is an optional prefix, used by the server, to generate a unique @@ -1448,7 +1448,7 @@ message ObjectMeta { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#idempotency optional string generateName = 2; // Namespace defines the space within each name must be unique. An empty namespace is @@ -1458,7 +1458,7 @@ message ObjectMeta { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md optional string namespace = 3; // SelfLink is a URL representing this object. @@ -1472,7 +1472,7 @@ message ObjectMeta { // // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids optional string uid = 5; // An opaque value that represents the internal version of this object that can @@ -1484,7 +1484,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency optional string resourceVersion = 6; // A sequence number representing a specific generation of the desired state. @@ -1498,7 +1498,7 @@ message ObjectMeta { // Populated by the system. // Read-only. // Null for lists. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.Time creationTimestamp = 8; // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This @@ -1514,7 +1514,7 @@ message ObjectMeta { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.Time deletionTimestamp = 9; // Number of seconds allowed for this object to gracefully terminate before @@ -1526,13 +1526,13 @@ message ObjectMeta { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md map labels = 11; // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/annotations.md map annotations = 12; // List of objects depended by this object. If ALL objects in the list have @@ -1556,26 +1556,26 @@ message ObjectMeta { // ObjectReference contains enough information to let you inspect or modify the referred object. message ObjectReference { // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional string kind = 1; // Namespace of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md optional string namespace = 2; // Name of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names optional string name = 3; // UID of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids optional string uid = 4; // API version of the referent. optional string apiVersion = 5; // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency optional string resourceVersion = 6; // If referring to a piece of an object instead of an entire object, this string @@ -1597,15 +1597,15 @@ message OwnerReference { optional string apiVersion = 5; // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional string kind = 1; // Name of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names optional string name = 3; // UID of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids optional string uid = 4; // If true, this reference points to the managing controller. @@ -1614,48 +1614,48 @@ message OwnerReference { // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. -// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md +// More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md message PersistentVolume { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes optional PersistentVolumeSpec spec = 2; // Status represents the current information/status for the persistent volume. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes optional PersistentVolumeStatus status = 3; } // PersistentVolumeClaim is a user's request for and claim to a persistent volume message PersistentVolumeClaim { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims optional PersistentVolumeClaimSpec spec = 2; // Status represents the current information/status of a persistent volume claim. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims optional PersistentVolumeClaimStatus status = 3; } // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. message PersistentVolumeClaimList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // A list of persistent volume claims. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims repeated PersistentVolumeClaim items = 2; } @@ -1663,14 +1663,14 @@ message PersistentVolumeClaimList { // and allows a Source for provider-specific attributes message PersistentVolumeClaimSpec { // AccessModes contains the desired access modes the volume should have. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1 + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1 repeated string accessModes = 1; // A label query over volumes to consider for binding. optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 4; // Resources represents the minimum resources the volume should have. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources optional ResourceRequirements resources = 2; // VolumeName is the binding reference to the PersistentVolume backing this claim. @@ -1683,7 +1683,7 @@ message PersistentVolumeClaimStatus { optional string phase = 1; // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1 + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1 repeated string accessModes = 2; // Represents the actual resources of the underlying volume. @@ -1696,7 +1696,7 @@ message PersistentVolumeClaimStatus { // type of volume that is owned by someone else (the system). message PersistentVolumeClaimVolumeSource { // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims optional string claimName = 1; // Will force the ReadOnly setting in VolumeMounts. @@ -1707,11 +1707,11 @@ message PersistentVolumeClaimVolumeSource { // PersistentVolumeList is a list of PersistentVolume items. message PersistentVolumeList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of persistent volumes. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md repeated PersistentVolume items = 2; } @@ -1720,32 +1720,32 @@ message PersistentVolumeList { message PersistentVolumeSource { // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; // HostPath represents a directory on the host. // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath optional HostPathVolumeSource hostPath = 3; // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md optional GlusterfsVolumeSource glusterfs = 4; // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs optional NFSVolumeSource nfs = 5; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md optional RBDVolumeSource rbd = 6; // ISCSI represents an ISCSI Disk resource that is attached to a @@ -1753,7 +1753,7 @@ message PersistentVolumeSource { optional ISCSIVolumeSource iscsi = 7; // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md optional CinderVolumeSource cinder = 8; // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -1786,33 +1786,33 @@ message PersistentVolumeSource { // PersistentVolumeSpec is the specification of a persistent volume. message PersistentVolumeSpec { // A description of the persistent volume's resources and capacity. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity map capacity = 1; // The actual volume backing the persistent volume. optional PersistentVolumeSource persistentVolumeSource = 2; // AccessModes contains all ways the volume can be mounted. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes repeated string accessModes = 3; // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // Expected to be non-nil when bound. // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#binding + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#binding optional ObjectReference claimRef = 4; // What happens to a persistent volume when released from its claim. // Valid options are Retain (default) and Recycle. // Recycling must be supported by the volume plugin underlying this persistent volume. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#recycling-policy + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#recycling-policy optional string persistentVolumeReclaimPolicy = 5; } // PersistentVolumeStatus is the current status of a persistent volume. message PersistentVolumeStatus { // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#phase + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#phase optional string phase = 1; // A human-readable message indicating details about why the volume is in this state. @@ -1827,18 +1827,18 @@ message PersistentVolumeStatus { // by clients and scheduled onto hosts. message Pod { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional PodSpec spec = 2; // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional PodStatus status = 3; } @@ -1964,12 +1964,12 @@ message PodAttachOptions { message PodCondition { // Type is the type of the condition. // Currently only Ready. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions optional string type = 1; // Status is the status of the condition. // Can be True, False, Unknown. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions optional string status = 2; // Last time we probed the condition. @@ -2017,11 +2017,11 @@ message PodExecOptions { // PodList is a list of Pods. message PodList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of pods. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pods.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pods.md repeated Pod items = 2; } @@ -2121,20 +2121,20 @@ message PodSignature { // PodSpec is a description of a pod. message PodSpec { // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md repeated Volume volumes = 1; // List of containers belonging to the pod. // Containers cannot currently be added or removed. // There must be at least one container in a Pod. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md repeated Container containers = 2; // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#restartpolicy + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#restartpolicy optional string restartPolicy = 3; // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. @@ -2158,11 +2158,11 @@ message PodSpec { // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/node-selection/README.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/node-selection/README.md map nodeSelector = 7; // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md optional string serviceAccountName = 8; // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. @@ -2198,7 +2198,7 @@ message PodSpec { // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod repeated LocalObjectReference imagePullSecrets = 15; // Specifies the hostname of the Pod @@ -2214,11 +2214,11 @@ message PodSpec { // state of a system. message PodStatus { // Current condition of the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-phase + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-phase optional string phase = 1; // Current service state of pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions repeated PodCondition conditions = 2; // A human readable message indicating details about why the pod is in this condition. @@ -2241,39 +2241,39 @@ message PodStatus { // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-statuses repeated ContainerStatus containerStatuses = 8; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded message PodStatusResult { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional PodStatus status = 2; } // PodTemplate describes a template for creating copies of a predefined pod. message PodTemplate { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional PodTemplateSpec template = 2; } // PodTemplateList is a list of PodTemplates. message PodTemplateList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of pod templates @@ -2283,11 +2283,11 @@ message PodTemplateList { // PodTemplateSpec describes the data a pod should have when created from a template message PodTemplateSpec { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional PodSpec spec = 2; } @@ -2329,12 +2329,12 @@ message Probe { optional Handler handler = 1; // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes optional int32 initialDelaySeconds = 2; // Number of seconds after which the probe times out. // Defaults to 1 second. Minimum value is 1. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes optional int32 timeoutSeconds = 3; // How often (in seconds) to perform the probe. @@ -2378,51 +2378,51 @@ message QuobyteVolumeSource { // RBD volumes support ownership management and SELinux relabeling. message RBDVolumeSource { // A collection of Ceph monitors. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#rbd + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#rbd // TODO: how do we prevent errors in the filesystem from compromising the machine optional string fsType = 3; // The rados pool name. // Default is rbd. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it. optional string pool = 4; // The rados user name. // Default is admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it optional LocalObjectReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it optional bool readOnly = 8; } // RangeAllocation is not a public type. message RangeAllocation { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Range is string that identifies the range represented by 'data'. @@ -2436,29 +2436,29 @@ message RangeAllocation { message ReplicationController { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ReplicationControllerSpec spec = 2; // Status is the most recently observed status of the replication controller. // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ReplicationControllerStatus status = 3; } // ReplicationControllerList is a collection of replication controllers. message ReplicationControllerList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of replication controllers. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md repeated ReplicationController items = 2; } @@ -2467,19 +2467,19 @@ message ReplicationControllerSpec { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller optional int32 replicas = 1; // Selector is a label query over pods that should match the Replicas count. // If Selector is empty, it is defaulted to the labels present on the Pod template. // Label keys and values that must match in order to be controlled by this replication // controller, if empty defaulted to labels on Pod template. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors map selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template optional PodTemplateSpec template = 3; } @@ -2487,7 +2487,7 @@ message ReplicationControllerSpec { // controller. message ReplicationControllerStatus { // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller optional int32 replicas = 1; // The number of pods that have labels matching the labels of the pod template of the replication controller. @@ -2515,33 +2515,33 @@ message ResourceFieldSelector { // ResourceQuota sets aggregate quota restrictions enforced per namespace message ResourceQuota { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Spec defines the desired quota. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ResourceQuotaSpec spec = 2; // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ResourceQuotaStatus status = 3; } // ResourceQuotaList is a list of ResourceQuota items. message ResourceQuotaList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota repeated ResourceQuota items = 2; } // ResourceQuotaSpec defines the desired hard limits to enforce for Quota. message ResourceQuotaSpec { // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota map hard = 1; // A collection of filters that must match each object tracked by a quota. @@ -2552,7 +2552,7 @@ message ResourceQuotaSpec { // ResourceQuotaStatus defines the enforced hard limits and observed use. message ResourceQuotaStatus { // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota map hard = 1; // Used is the current observed total usage of the resource in the namespace. @@ -2591,7 +2591,7 @@ message SELinuxOptions { // the Data field must be less than MaxSecretSize bytes. message Secret { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN @@ -2624,11 +2624,11 @@ message SecretKeySelector { // SecretList is a list of Secret. message SecretList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is a list of secret objects. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md repeated Secret items = 2; } @@ -2639,7 +2639,7 @@ message SecretList { // Secret volumes support ownership management and SELinux relabeling. message SecretVolumeSource { // Name of the secret in the pod's namespace to use. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets optional string secretName = 1; // If unspecified, each key-value pair in the Data field of the referenced @@ -2708,17 +2708,17 @@ message SerializedReference { // will answer requests sent through the proxy. message Service { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Spec defines the behavior of a service. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ServiceSpec spec = 2; // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ServiceStatus status = 3; } @@ -2728,35 +2728,35 @@ message Service { // * a set of secrets message ServiceAccount { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional ObjectMeta metadata = 1; // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md repeated ObjectReference secrets = 2; // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret repeated LocalObjectReference imagePullSecrets = 3; } // ServiceAccountList is a list of ServiceAccount objects message ServiceAccountList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of ServiceAccounts. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + // More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md#service-accounts repeated ServiceAccount items = 2; } // ServiceList holds a list of services. message ServiceList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of services @@ -2785,14 +2785,14 @@ message ServicePort { // of the 'port' field is used (an identity map). // This field is ignored for services with clusterIP=None, and should be // omitted or set equal to the 'port' field. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#defining-a-service + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#defining-a-service optional k8s.io.kubernetes.pkg.util.intstr.IntOrString targetPort = 4; // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. // Usually assigned by the system. If specified, it will be allocated to the service // if unused or else creation of the service will fail. // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#type--nodeport + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#type--nodeport optional int32 nodePort = 5; } @@ -2809,7 +2809,7 @@ message ServiceProxyOptions { // ServiceSpec describes the attributes that a user creates on a service. message ServiceSpec { // The list of ports that are exposed by this service. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies repeated ServicePort ports = 1; // Route service traffic to pods with label keys and values matching this @@ -2817,7 +2817,7 @@ message ServiceSpec { // external process managing its endpoints, which Kubernetes will not // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. // Ignored if type is ExternalName. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview map selector = 2; // clusterIP is the IP address of the service and is usually assigned @@ -2828,7 +2828,7 @@ message ServiceSpec { // can be specified for headless services when proxying is not required. // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if // type is ExternalName. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies optional string clusterIP = 3; // type determines how the Service is exposed. Defaults to ClusterIP. Valid @@ -2844,7 +2844,7 @@ message ServiceSpec { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview optional string type = 4; // externalIPs is a list of IP addresses for which nodes in the cluster @@ -2868,7 +2868,7 @@ message ServiceSpec { // Enable client IP based session affinity. // Must be ClientIP or None. // Defaults to None. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies optional string sessionAffinity = 7; // Only applies to Service Type: LoadBalancer @@ -2881,7 +2881,7 @@ message ServiceSpec { // If specified and supported by the platform, this will restrict traffic through the cloud-provider // load-balancer will be restricted to the specified client IPs. This field will be ignored if the // cloud-provider does not support the feature." - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services-firewalls.md repeated string loadBalancerSourceRanges = 9; // externalName is the external reference that kubedns or equivalent will @@ -2945,7 +2945,7 @@ message Toleration { message Volume { // Volume's name. // Must be a DNS_LABEL and unique within the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names optional string name = 1; // VolumeSource represents the location and type of the mounted volume. @@ -2979,53 +2979,53 @@ message VolumeSource { // machine that is directly exposed to the container. This is generally // used for system agents or other privileged things that are allowed // to see the host machine. Most containers will NOT need this. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath // --- // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not // mount host directories as read/write. optional HostPathVolumeSource hostPath = 1; // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir optional EmptyDirVolumeSource emptyDir = 2; // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; // GitRepo represents a git repository at a particular revision. optional GitRepoVolumeSource gitRepo = 5; // Secret represents a secret that should populate this volume. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets optional SecretVolumeSource secret = 6; // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs optional NFSVolumeSource nfs = 7; // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/iscsi/README.md optional ISCSIVolumeSource iscsi = 8; // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md optional GlusterfsVolumeSource glusterfs = 9; // PersistentVolumeClaimVolumeSource represents a reference to a // PersistentVolumeClaim in the same namespace. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md optional RBDVolumeSource rbd = 11; // FlexVolume represents a generic volume resource that is @@ -3034,7 +3034,7 @@ message VolumeSource { optional FlexVolumeSource flexVolume = 12; // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md optional CinderVolumeSource cinder = 13; // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/v1/types.go b/vendor/k8s.io/client-go/1.4/pkg/api/v1/types.go index 6ec8a64b529b..2213f4b490f3 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/v1/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/api/v1/types.go @@ -71,7 +71,7 @@ type ObjectMeta struct { // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // GenerateName is an optional prefix, used by the server, to generate a unique @@ -88,7 +88,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#idempotency GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` // Namespace defines the space within each name must be unique. An empty namespace is @@ -98,7 +98,7 @@ type ObjectMeta struct { // // Must be a DNS_LABEL. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` // SelfLink is a URL representing this object. @@ -112,7 +112,7 @@ type ObjectMeta struct { // // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` // An opaque value that represents the internal version of this object that can @@ -124,7 +124,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` // A sequence number representing a specific generation of the desired state. @@ -138,7 +138,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata CreationTimestamp unversioned.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This @@ -154,7 +154,7 @@ type ObjectMeta struct { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata DeletionTimestamp *unversioned.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` // Number of seconds allowed for this object to gracefully terminate before @@ -166,13 +166,13 @@ type ObjectMeta struct { // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/annotations.md Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` // List of objects depended by this object. If ALL objects in the list have @@ -204,7 +204,7 @@ const ( type Volume struct { // Volume's name. // Must be a DNS_LABEL and unique within the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // VolumeSource represents the location and type of the mounted volume. // If not specified, the Volume is implied to be an EmptyDir. @@ -219,50 +219,50 @@ type VolumeSource struct { // machine that is directly exposed to the container. This is generally // used for system agents or other privileged things that are allowed // to see the host machine. Most containers will NOT need this. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath // --- // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not // mount host directories as read/write. HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` // GitRepo represents a git repository at a particular revision. GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` // Secret represents a secret that should populate this volume. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/iscsi/README.md ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // PersistentVolumeClaimVolumeSource represents a reference to a // PersistentVolumeClaim in the same namespace. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. This is an // alpha feature and may change in future. FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` @@ -290,7 +290,7 @@ type VolumeSource struct { // type of volume that is owned by someone else (the system). type PersistentVolumeClaimVolumeSource struct { // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` // Will force the ReadOnly setting in VolumeMounts. // Default false. @@ -302,33 +302,33 @@ type PersistentVolumeClaimVolumeSource struct { type PersistentVolumeSource struct { // GCEPersistentDisk represents a GCE Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` // AWSElasticBlockStore represents an AWS Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` // HostPath represents a directory on the host. // Provisioned by a developer or tester. // This is useful for single-node development and testing only! // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` @@ -355,44 +355,44 @@ type PersistentVolumeSource struct { // PersistentVolume (PV) is a storage resource provisioned by an administrator. // It is analogous to a node. -// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md +// More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md type PersistentVolume struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines a specification of a persistent volume owned by the cluster. // Provisioned by an administrator. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status represents the current information/status for the persistent volume. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // PersistentVolumeSpec is the specification of a persistent volume. type PersistentVolumeSpec struct { // A description of the persistent volume's resources and capacity. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // The actual volume backing the persistent volume. PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` // AccessModes contains all ways the volume can be mounted. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. // Expected to be non-nil when bound. // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#binding + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#binding ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` // What happens to a persistent volume when released from its claim. // Valid options are Retain (default) and Recycle. // Recycling must be supported by the volume plugin underlying this persistent volume. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#recycling-policy + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#recycling-policy PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` } @@ -414,7 +414,7 @@ const ( // PersistentVolumeStatus is the current status of a persistent volume. type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#phase + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#phase Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` // A human-readable message indicating details about why the volume is in this state. Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` @@ -427,10 +427,10 @@ type PersistentVolumeStatus struct { type PersistentVolumeList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of persistent volumes. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -440,16 +440,16 @@ type PersistentVolumeList struct { type PersistentVolumeClaim struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status represents the current information/status of a persistent volume claim. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -457,10 +457,10 @@ type PersistentVolumeClaim struct { type PersistentVolumeClaimList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // A list of persistent volume claims. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -468,12 +468,12 @@ type PersistentVolumeClaimList struct { // and allows a Source for provider-specific attributes type PersistentVolumeClaimSpec struct { // AccessModes contains the desired access modes the volume should have. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1 + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1 AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // A label query over volumes to consider for binding. Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // Resources represents the minimum resources the volume should have. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` // VolumeName is the binding reference to the PersistentVolume backing this claim. VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` @@ -484,7 +484,7 @@ type PersistentVolumeClaimStatus struct { // Phase represents the current phase of PersistentVolumeClaim. Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1 + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1 AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` // Represents the actual resources of the underlying volume. Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` @@ -536,7 +536,7 @@ const ( // Host path volumes do not support ownership management or SELinux relabeling. type HostPathVolumeSource struct { // Path of the directory on the host. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath Path string `json:"path" protobuf:"bytes,1,opt,name=path"` } @@ -546,7 +546,7 @@ type EmptyDirVolumeSource struct { // What type of storage medium should back this directory. // The default is "" which means to use the node's default medium. // Must be an empty string (default) or Memory. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` } @@ -554,16 +554,16 @@ type EmptyDirVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } @@ -571,37 +571,37 @@ type GlusterfsVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // A collection of Ceph monitors. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#rbd + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#rbd // TODO: how do we prevent errors in the filesystem from compromising the machine FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it. + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it. RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -611,16 +611,16 @@ type RBDVolumeSource struct { // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { // volume id used to identify the volume in cinder - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md + // More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } @@ -628,22 +628,22 @@ type CinderVolumeSource struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it + // More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -680,23 +680,23 @@ const ( // PDs support ownership management and SELinux relabeling. type GCEPersistentDiskVolumeSource struct { // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk // TODO: how do we prevent errors in the filesystem from compromising the machine FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // The partition in the volume that you want to mount. // If omitted, the default is to mount by volume name. // Examples: For volume /dev/sda1, you specify the partition as "1". // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` } @@ -754,12 +754,12 @@ type FlexVolumeSource struct { // ownership management and SELinux relabeling. type AWSElasticBlockStoreVolumeSource struct { // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore // TODO: how do we prevent errors in the filesystem from compromising the machine FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // The partition in the volume that you want to mount. @@ -769,7 +769,7 @@ type AWSElasticBlockStoreVolumeSource struct { Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". // If omitted, the default is "false". - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` } @@ -795,7 +795,7 @@ type GitRepoVolumeSource struct { // Secret volumes support ownership management and SELinux relabeling. type SecretVolumeSource struct { // Name of the secret in the pod's namespace to use. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` // If unspecified, each key-value pair in the Data field of the referenced // Secret will be projected into the volume as a file whose name is the @@ -821,17 +821,17 @@ const ( // NFS volumes do not support ownership management or SELinux relabeling. type NFSVolumeSource struct { // Server is the hostname or IP address of the NFS server. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs Server string `json:"server" protobuf:"bytes,1,opt,name=server"` // Path that is exported by the NFS server. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force // the NFS export to be mounted with read-only permissions. // Defaults to false. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` } @@ -851,7 +851,7 @@ type ISCSIVolumeSource struct { // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#iscsi + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#iscsi // TODO: how do we prevent errors in the filesystem from compromising the machine FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. @@ -1131,11 +1131,11 @@ type Probe struct { // The action taken to determine the health of a container Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` // Number of seconds after the container has started before liveness probes are initiated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"` // Number of seconds after which the probe times out. // Defaults to 1 second. Minimum value is 1. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` // How often (in seconds) to perform the probe. // Default to 10 seconds. Minimum value is 1. @@ -1195,7 +1195,7 @@ type Container struct { // Cannot be updated. Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Docker image name. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` // Entrypoint array. Not executed within a shell. // The docker image's ENTRYPOINT is used if this is not provided. @@ -1204,7 +1204,7 @@ type Container struct { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` // Arguments to the entrypoint. // The docker image's CMD is used if this is not provided. @@ -1213,7 +1213,7 @@ type Container struct { // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, // regardless of whether the variable exists or not. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` // Container's working directory. // If not specified, the container runtime's default will be used, which @@ -1233,7 +1233,7 @@ type Container struct { Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` // Compute Resources required by this container. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. @@ -1241,12 +1241,12 @@ type Container struct { // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` // Periodic probe of container service readiness. // Container will be removed from service endpoints if the probe fails. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. @@ -1261,10 +1261,10 @@ type Container struct { // One of Always, Never, IfNotPresent. // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#updating-images + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#updating-images ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` // Security options the pod should run with. - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md + // More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) @@ -1308,14 +1308,14 @@ type Lifecycle struct { // PostStart is called immediately after a container is created. If the handler fails, // the container is terminated and restarted according to its restart policy. // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` // PreStop is called immediately before a container is terminated. // The container is terminated after the handler completes. // The reason for termination is passed to the handler. // Regardless of the outcome of the handler, the container is eventually terminated. // Other management of the container blocks until the hook completes. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` } @@ -1392,13 +1392,13 @@ type ContainerStatus struct { // garbage collection. This value will get capped at 5 by GC. RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` // The image the container is running. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md // TODO(dchen1107): Which image the container is running with? Image string `json:"image" protobuf:"bytes,6,opt,name=image"` // ImageID of the container's image. ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` // Container's ID in the format 'docker://'. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#container-information + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#container-information ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` } @@ -1441,11 +1441,11 @@ const ( type PodCondition struct { // Type is the type of the condition. // Currently only Ready. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` // Status is the status of the condition. // Can be True, False, Unknown. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` // Last time we probed the condition. LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` @@ -1749,14 +1749,19 @@ const ( PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers" // This annotation key will be used to contain an array of v1 JSON encoded // ContainerStatuses for init containers. The annotation will be placed into the internal - // type and cleared. - PodInitContainerStatusesAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses" + // type and cleared. This key is only recognized by version >= 1.4. + PodInitContainerStatusesBetaAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses" + // This annotation key will be used to contain an array of v1 JSON encoded + // ContainerStatuses for init containers. The annotation will be placed into the internal + // type and cleared. This key is recognized by version >= 1.3. For version 1.4 code, + // this key will have its value copied to the beta key. + PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses" ) // PodSpec is a description of a pod. type PodSpec struct { // List of volumes that can be mounted by containers belonging to the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` // List of initialization containers belonging to the pod. // Init containers are executed in order prior to containers being started. If any @@ -1771,18 +1776,18 @@ type PodSpec struct { // Init containers cannot currently be added or removed. // Init containers are in alpha state and may change without notice. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md InitContainers []Container `json:"-" patchStrategy:"merge" patchMergeKey:"name"` // List of containers belonging to the pod. // Containers cannot currently be added or removed. // There must be at least one container in a Pod. // Cannot be updated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#restartpolicy + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#restartpolicy RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. // Value must be non-negative integer. The value zero indicates delete immediately. @@ -1802,11 +1807,11 @@ type PodSpec struct { DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/node-selection/README.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/node-selection/README.md NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md + // More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. // Deprecated: Use serviceAccountName instead. @@ -1836,7 +1841,7 @@ type PodSpec struct { // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, // in the case of docker, only DockerConfig type secrets are honored. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` // Specifies the hostname of the Pod // If not specified, the pod's hostname will be set to a system-defined value. @@ -1889,10 +1894,10 @@ type PodSecurityContext struct { // state of a system. type PodStatus struct { // Current condition of the pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-phase + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-phase Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` // Current service state of pod. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` // A human readable message indicating details about why the pod is in this condition. Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` @@ -1914,11 +1919,11 @@ type PodStatus struct { // init container will have ready = true, the most recently started container will have // startTime set. // Init containers are in alpha state and may change without notice. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-statuses InitContainerStatuses []ContainerStatus `json:"-"` // The list has one entry per container in the manifest. Each entry is currently the output // of `docker inspect`. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-statuses ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` } @@ -1926,13 +1931,13 @@ type PodStatus struct { type PodStatusResult struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } @@ -1943,18 +1948,18 @@ type PodStatusResult struct { type Pod struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -1962,22 +1967,22 @@ type Pod struct { type PodList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pods. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/pods.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/pods.md Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` } // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -1987,11 +1992,11 @@ type PodTemplateSpec struct { type PodTemplate struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines the pods that will be created from this pod template. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -1999,7 +2004,7 @@ type PodTemplate struct { type PodTemplateList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pod templates @@ -2011,14 +2016,14 @@ type ReplicationControllerSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // Selector is a label query over pods that should match the Replicas count. // If Selector is empty, it is defaulted to the labels present on the Pod template. // Label keys and values that must match in order to be controlled by this replication // controller, if empty defaulted to labels on Pod template. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // TemplateRef is a reference to an object that describes the pod that will be created if @@ -2028,7 +2033,7 @@ type ReplicationControllerSpec struct { // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } @@ -2036,7 +2041,7 @@ type ReplicationControllerSpec struct { // controller. type ReplicationControllerStatus struct { // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` // The number of pods that have labels matching the labels of the pod template of the replication controller. @@ -2057,18 +2062,18 @@ type ReplicationController struct { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the replication controller. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the most recently observed status of the replication controller. // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -2076,11 +2081,11 @@ type ReplicationController struct { type ReplicationControllerList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of replication controllers. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -2147,7 +2152,7 @@ type LoadBalancerIngress struct { // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies Ports []ServicePort `json:"ports" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` // Route service traffic to pods with label keys and values matching this @@ -2155,7 +2160,7 @@ type ServiceSpec struct { // external process managing its endpoints, which Kubernetes will not // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. // Ignored if type is ExternalName. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // clusterIP is the IP address of the service and is usually assigned @@ -2166,7 +2171,7 @@ type ServiceSpec struct { // can be specified for headless services when proxying is not required. // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if // type is ExternalName. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` // type determines how the Service is exposed. Defaults to ClusterIP. Valid @@ -2182,7 +2187,7 @@ type ServiceSpec struct { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` // externalIPs is a list of IP addresses for which nodes in the cluster @@ -2206,7 +2211,7 @@ type ServiceSpec struct { // Enable client IP based session affinity. // Must be ClientIP or None. // Defaults to None. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` // Only applies to Service Type: LoadBalancer @@ -2219,7 +2224,7 @@ type ServiceSpec struct { // If specified and supported by the platform, this will restrict traffic through the cloud-provider // load-balancer will be restricted to the specified client IPs. This field will be ignored if the // cloud-provider does not support the feature." - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services-firewalls.md LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` // externalName is the external reference that kubedns or equivalent will @@ -2250,14 +2255,14 @@ type ServicePort struct { // of the 'port' field is used (an identity map). // This field is ignored for services with clusterIP=None, and should be // omitted or set equal to the 'port' field. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#defining-a-service + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#defining-a-service TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. // Usually assigned by the system. If specified, it will be allocated to the service // if unused or else creation of the service will fail. // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#type--nodeport + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#type--nodeport NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` } @@ -2269,17 +2274,17 @@ type ServicePort struct { type Service struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a service. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -2293,7 +2298,7 @@ const ( type ServiceList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of services @@ -2309,17 +2314,17 @@ type ServiceList struct { type ServiceAccount struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` } @@ -2327,11 +2332,11 @@ type ServiceAccount struct { type ServiceAccountList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ServiceAccounts. - // More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts + // More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md#service-accounts Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -2352,7 +2357,7 @@ type ServiceAccountList struct { type Endpoints struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The set of all endpoints is the union of all subsets. Addresses are placed into @@ -2424,7 +2429,7 @@ type EndpointPort struct { type EndpointsList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of endpoints. @@ -2441,7 +2446,7 @@ type NodeSpec struct { // ID of the node assigned by the cloud provider in the format: :// ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"` + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#manual-node-administration"` Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` } @@ -2490,26 +2495,26 @@ type NodeSystemInfo struct { // NodeStatus is information about the current status of a node. type NodeStatus struct { // Capacity represents the total resources of a node. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity for more details. + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity for more details. Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` // Allocatable represents the resources of a node that are available for scheduling. // Defaults to Capacity. Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` // NodePhase is the recently observed lifecycle phase of the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-phase // The field is never populated, and now is deprecated. Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` // Conditions is an array of current observed node conditions. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-condition Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` // List of addresses reachable to the node. // Queried from cloud provider, if available. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-addresses Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` // Endpoints of daemons running on the Node. DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` // Set of ids/uuids to uniquely identify the node. - // More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info + // More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-info NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` // List of container images on this node Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` @@ -2662,17 +2667,17 @@ type ResourceList map[ResourceName]resource.Quantity type Node struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a node. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -2680,7 +2685,7 @@ type Node struct { type NodeList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of nodes @@ -2697,14 +2702,14 @@ const ( // NamespaceSpec describes the attributes on a Namespace. type NamespaceSpec struct { // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers + // More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#finalizers Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` } // NamespaceStatus is information about the current status of a Namespace. type NamespaceStatus struct { // Phase is the current lifecycle phase of the namespace. - // More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases + // More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#phases Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` } @@ -2726,15 +2731,15 @@ const ( type Namespace struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -2742,11 +2747,11 @@ type Namespace struct { type NamespaceList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Namespace objects in the list. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -2755,7 +2760,7 @@ type NamespaceList struct { type Binding struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The target object that you want to bind to the standard object. @@ -2945,13 +2950,13 @@ type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names Name string `json:"name" protobuf:"bytes,3,opt,name=name"` // UID of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` // If true, this reference points to the managing controller. Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` @@ -2960,21 +2965,21 @@ type OwnerReference struct { // ObjectReference contains enough information to let you inspect or modify the referred object. type ObjectReference struct { // Kind of the referent. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // Namespace of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` // Name of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` // UID of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` // API version of the referent. APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` // Specific resourceVersion to which this reference is made, if any. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` // If referring to a piece of an object instead of an entire object, this string @@ -2992,7 +2997,7 @@ type ObjectReference struct { // referenced object inside the same namespace. type LocalObjectReference struct { // Name of the referent. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names // TODO: Add other useful fields. apiVersion, kind, uid? Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` } @@ -3027,7 +3032,7 @@ const ( type Event struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // The object that this event is about. @@ -3062,7 +3067,7 @@ type Event struct { type EventList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of events @@ -3073,7 +3078,7 @@ type EventList struct { type List struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of objects @@ -3118,11 +3123,11 @@ type LimitRangeSpec struct { type LimitRange struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the limits enforced. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -3130,11 +3135,11 @@ type LimitRange struct { type LimitRangeList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of LimitRange objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md + // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_limit_range.md Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -3187,7 +3192,7 @@ const ( // ResourceQuotaSpec defines the desired hard limits to enforce for Quota. type ResourceQuotaSpec struct { // Hard is the set of desired hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` // A collection of filters that must match each object tracked by a quota. // If not specified, the quota matches all objects. @@ -3197,7 +3202,7 @@ type ResourceQuotaSpec struct { // ResourceQuotaStatus defines the enforced hard limits and observed use. type ResourceQuotaStatus struct { // Hard is the set of enforced hard limits for each named resource. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` // Used is the current observed total usage of the resource in the namespace. Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` @@ -3209,15 +3214,15 @@ type ResourceQuotaStatus struct { type ResourceQuota struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired quota. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status defines the actual enforced quota and its current usage. - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3225,11 +3230,11 @@ type ResourceQuota struct { type ResourceQuotaList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of ResourceQuota objects. - // More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota + // More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -3240,7 +3245,7 @@ type ResourceQuotaList struct { type Secret struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN @@ -3319,11 +3324,11 @@ const ( type SecretList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of secret objects. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -3333,7 +3338,7 @@ type SecretList struct { type ConfigMap struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Data contains the configuration data. @@ -3345,7 +3350,7 @@ type ConfigMap struct { type ConfigMapList struct { unversioned.TypeMeta `json:",inline"` - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of ConfigMaps. @@ -3383,7 +3388,7 @@ type ComponentCondition struct { type ComponentStatus struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of component conditions observed @@ -3394,7 +3399,7 @@ type ComponentStatus struct { type ComponentStatusList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ComponentStatus objects. @@ -3483,7 +3488,7 @@ type SELinuxOptions struct { type RangeAllocation struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Range is string that identifies the range represented by 'data'. diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/1.4/pkg/api/v1/types_swagger_doc_generated.go index a2552bdf3f52..8e8954109808 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/api/v1/types_swagger_doc_generated.go @@ -29,10 +29,10 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_AWSElasticBlockStoreVolumeSource = map[string]string{ "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", - "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", + "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore", "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", - "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", + "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore", } func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { @@ -95,7 +95,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { var map_Binding = map[string]string{ "": "Binding ties one object to another. For example, a pod is bound to a node by a scheduler.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -115,12 +115,12 @@ func (Capabilities) SwaggerDoc() map[string]string { var map_CephFSVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/release-1.4/examples/volumes/cephfs/README.md#how-to-use-it", } func (CephFSVolumeSource) SwaggerDoc() map[string]string { @@ -129,9 +129,9 @@ func (CephFSVolumeSource) SwaggerDoc() map[string]string { var map_CinderVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md", } func (CinderVolumeSource) SwaggerDoc() map[string]string { @@ -152,7 +152,7 @@ func (ComponentCondition) SwaggerDoc() map[string]string { var map_ComponentStatus = map[string]string{ "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "conditions": "List of component conditions observed", } @@ -162,7 +162,7 @@ func (ComponentStatus) SwaggerDoc() map[string]string { var map_ComponentStatusList = map[string]string{ "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", "items": "List of ComponentStatus objects.", } @@ -172,7 +172,7 @@ func (ComponentStatusList) SwaggerDoc() map[string]string { var map_ConfigMap = map[string]string{ "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "data": "Data contains the configuration data. Each key must be a valid DNS_SUBDOMAIN with an optional leading dot.", } @@ -191,7 +191,7 @@ func (ConfigMapKeySelector) SwaggerDoc() map[string]string { var map_ConfigMapList = map[string]string{ "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is the list of ConfigMaps.", } @@ -212,20 +212,20 @@ func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { var map_Container = map[string]string{ "": "A single application container that you want to run within a pod.", "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - "image": "Docker image name. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md#containers-and-commands", + "image": "Docker image name. More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md#containers-and-commands", "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", "env": "List of environment variables to set in the container. Cannot be updated.", - "resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources", + "resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", - "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", + "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes", + "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.", - "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#updating-images", - "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#updating-images", + "securityContext": "Security options the pod should run with. More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md", "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", @@ -310,9 +310,9 @@ var map_ContainerStatus = map[string]string{ "lastState": "Details about the container's last termination condition.", "ready": "Specifies whether the container has passed its readiness probe.", "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", - "image": "The image the container is running. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md", + "image": "The image the container is running. More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md", "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format 'docker://'. More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#container-information", + "containerID": "Container's ID in the format 'docker://'. More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#container-information", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -363,7 +363,7 @@ func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { var map_EmptyDirVolumeSource = map[string]string{ "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", - "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir", + "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir", } func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { @@ -406,7 +406,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { var map_Endpoints = map[string]string{ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -416,7 +416,7 @@ func (Endpoints) SwaggerDoc() map[string]string { var map_EndpointsList = map[string]string{ "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", "items": "List of endpoints.", } @@ -449,7 +449,7 @@ func (EnvVarSource) SwaggerDoc() map[string]string { var map_Event = map[string]string{ "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", "message": "A human-readable description of the status of this operation.", @@ -466,7 +466,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", "items": "List of events", } @@ -539,10 +539,10 @@ func (FlockerVolumeSource) SwaggerDoc() map[string]string { var map_GCEPersistentDiskVolumeSource = map[string]string{ "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", - "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", - "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", + "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk", } func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { @@ -562,9 +562,9 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { @@ -607,7 +607,7 @@ func (Handler) SwaggerDoc() map[string]string { var map_HostPathVolumeSource = map[string]string{ "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", - "path": "Path of the directory on the host. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath", + "path": "Path of the directory on the host. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath", } func (HostPathVolumeSource) SwaggerDoc() map[string]string { @@ -620,7 +620,7 @@ var map_ISCSIVolumeSource = map[string]string{ "iqn": "Target iSCSI Qualified Name.", "lun": "iSCSI target lun number.", "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#iscsi", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#iscsi", "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", } @@ -641,8 +641,8 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", - "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details", - "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/HEAD/docs/user-guide/container-environment.md#hook-details", + "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details", + "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://releases.k8s.io/release-1.4/docs/user-guide/container-environment.md#hook-details", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -651,8 +651,8 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (LimitRange) SwaggerDoc() map[string]string { @@ -675,8 +675,8 @@ func (LimitRangeItem) SwaggerDoc() map[string]string { var map_LimitRangeList = map[string]string{ "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of LimitRange objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_limit_range.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of LimitRange objects. More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_limit_range.md", } func (LimitRangeList) SwaggerDoc() map[string]string { @@ -694,7 +694,7 @@ func (LimitRangeSpec) SwaggerDoc() map[string]string { var map_List = map[string]string{ "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", "items": "List of objects", } @@ -736,7 +736,7 @@ func (LoadBalancerStatus) SwaggerDoc() map[string]string { var map_LocalObjectReference = map[string]string{ "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "name": "Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", + "name": "Name of the referent. More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names", } func (LocalObjectReference) SwaggerDoc() map[string]string { @@ -745,9 +745,9 @@ func (LocalObjectReference) SwaggerDoc() map[string]string { var map_NFSVolumeSource = map[string]string{ "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", - "server": "Server is the hostname or IP address of the NFS server. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", - "path": "Path that is exported by the NFS server. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", - "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", + "server": "Server is the hostname or IP address of the NFS server. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs", + "path": "Path that is exported by the NFS server. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs", + "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs", } func (NFSVolumeSource) SwaggerDoc() map[string]string { @@ -756,9 +756,9 @@ func (NFSVolumeSource) SwaggerDoc() map[string]string { var map_Namespace = map[string]string{ "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (Namespace) SwaggerDoc() map[string]string { @@ -767,8 +767,8 @@ func (Namespace) SwaggerDoc() map[string]string { var map_NamespaceList = map[string]string{ "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "Items is the list of Namespace objects in the list. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "Items is the list of Namespace objects in the list. More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md", } func (NamespaceList) SwaggerDoc() map[string]string { @@ -777,7 +777,7 @@ func (NamespaceList) SwaggerDoc() map[string]string { var map_NamespaceSpec = map[string]string{ "": "NamespaceSpec describes the attributes on a Namespace.", - "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#finalizers", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#finalizers", } func (NamespaceSpec) SwaggerDoc() map[string]string { @@ -786,7 +786,7 @@ func (NamespaceSpec) SwaggerDoc() map[string]string { var map_NamespaceStatus = map[string]string{ "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: http://releases.k8s.io/HEAD/docs/design/namespaces.md#phases", + "phase": "Phase is the current lifecycle phase of the namespace. More info: http://releases.k8s.io/release-1.4/docs/design/namespaces.md#phases", } func (NamespaceStatus) SwaggerDoc() map[string]string { @@ -795,9 +795,9 @@ func (NamespaceStatus) SwaggerDoc() map[string]string { var map_Node = map[string]string{ "": "Node is a worker node in Kubernetes, formerly known as minion. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (Node) SwaggerDoc() map[string]string { @@ -849,7 +849,7 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", "items": "List of nodes", } @@ -900,7 +900,7 @@ var map_NodeSpec = map[string]string{ "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", "externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.", "providerID": "ID of the node assigned by the cloud provider in the format: ://", - "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration\"`", + "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#manual-node-administration\"`", } func (NodeSpec) SwaggerDoc() map[string]string { @@ -909,13 +909,13 @@ func (NodeSpec) SwaggerDoc() map[string]string { var map_NodeStatus = map[string]string{ "": "NodeStatus is information about the current status of a node.", - "capacity": "Capacity represents the total resources of a node. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity for more details.", + "capacity": "Capacity represents the total resources of a node. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity for more details.", "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", - "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-phase The field is never populated, and now is deprecated.", - "conditions": "Conditions is an array of current observed node conditions. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-addresses", + "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-phase The field is never populated, and now is deprecated.", + "conditions": "Conditions is an array of current observed node conditions. More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-condition", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-addresses", "daemonEndpoints": "Endpoints of daemons running on the Node.", - "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info", + "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/release-1.4/docs/admin/node.md#node-info", "images": "List of container images on this node", "volumesInUse": "List of attachable volumes in use (mounted) by the node.", "volumesAttached": "List of volumes that are attached to the node.", @@ -955,18 +955,18 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string { var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency", - "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md", "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", - "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet will send a hard termination signal to the container. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource will be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. Once the resource is deleted in the API, the Kubelet will send a hard termination signal to the container. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md", - "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://releases.k8s.io/HEAD/docs/user-guide/annotations.md", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://releases.k8s.io/release-1.4/docs/user-guide/annotations.md", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", @@ -978,12 +978,12 @@ func (ObjectMeta) SwaggerDoc() map[string]string { var map_ObjectReference = map[string]string{ "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "namespace": "Namespace of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/namespaces.md", - "name": "Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", - "uid": "UID of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids", + "kind": "Kind of the referent. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "namespace": "Namespace of the referent. More info: http://releases.k8s.io/release-1.4/docs/user-guide/namespaces.md", + "name": "Name of the referent. More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names", + "uid": "UID of the referent. More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids", "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#concurrency-control-and-consistency", "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", } @@ -994,9 +994,9 @@ func (ObjectReference) SwaggerDoc() map[string]string { var map_OwnerReference = map[string]string{ "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "name": "Name of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", - "uid": "UID of the referent. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#uids", + "kind": "Kind of the referent. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "name": "Name of the referent. More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names", + "uid": "UID of the referent. More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#uids", "controller": "If true, this reference points to the managing controller.", } @@ -1005,10 +1005,10 @@ func (OwnerReference) SwaggerDoc() map[string]string { } var map_PersistentVolume = map[string]string{ - "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes", - "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistent-volumes", + "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes", + "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistent-volumes", } func (PersistentVolume) SwaggerDoc() map[string]string { @@ -1017,9 +1017,9 @@ func (PersistentVolume) SwaggerDoc() map[string]string { var map_PersistentVolumeClaim = map[string]string{ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", - "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", } func (PersistentVolumeClaim) SwaggerDoc() map[string]string { @@ -1028,8 +1028,8 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimList = map[string]string{ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "A list of persistent volume claims. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "A list of persistent volume claims. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", } func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { @@ -1038,9 +1038,9 @@ func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimSpec = map[string]string{ "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1", + "accessModes": "AccessModes contains the desired access modes the volume should have. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1", "selector": "A label query over volumes to consider for binding.", - "resources": "Resources represents the minimum resources the volume should have. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources", + "resources": "Resources represents the minimum resources the volume should have. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", } @@ -1051,7 +1051,7 @@ func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimStatus = map[string]string{ "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", "phase": "Phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes-1", + "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes-1", "capacity": "Represents the actual resources of the underlying volume.", } @@ -1061,7 +1061,7 @@ func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimVolumeSource = map[string]string{ "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", - "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", } @@ -1071,8 +1071,8 @@ func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeList = map[string]string{ "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of persistent volumes. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "List of persistent volumes. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md", } func (PersistentVolumeList) SwaggerDoc() map[string]string { @@ -1081,14 +1081,14 @@ func (PersistentVolumeList) SwaggerDoc() map[string]string { var map_PersistentVolumeSource = map[string]string{ "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", - "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore", + "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md", + "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", @@ -1105,10 +1105,10 @@ func (PersistentVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeSpec = map[string]string{ "": "PersistentVolumeSpec is the specification of a persistent volume.", - "capacity": "A description of the persistent volume's resources and capacity. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#capacity", - "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#access-modes", - "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#binding", - "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#recycling-policy", + "capacity": "A description of the persistent volume's resources and capacity. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#capacity", + "accessModes": "AccessModes contains all ways the volume can be mounted. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#access-modes", + "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#binding", + "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#recycling-policy", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1117,7 +1117,7 @@ func (PersistentVolumeSpec) SwaggerDoc() map[string]string { var map_PersistentVolumeStatus = map[string]string{ "": "PersistentVolumeStatus is the current status of a persistent volume.", - "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#phase", + "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#phase", "message": "A human-readable message indicating details about why the volume is in this state.", "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", } @@ -1128,9 +1128,9 @@ func (PersistentVolumeStatus) SwaggerDoc() map[string]string { var map_Pod = map[string]string{ "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (Pod) SwaggerDoc() map[string]string { @@ -1183,8 +1183,8 @@ func (PodAttachOptions) SwaggerDoc() map[string]string { var map_PodCondition = map[string]string{ "": "PodCondition contains details for the current condition of this pod.", - "type": "Type is the type of the condition. Currently only Ready. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions", - "status": "Status is the status of the condition. Can be True, False, Unknown. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions", + "type": "Type is the type of the condition. Currently only Ready. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions", + "status": "Status is the status of the condition. Can be True, False, Unknown. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions", "lastProbeTime": "Last time we probed the condition.", "lastTransitionTime": "Last time the condition transitioned from one status to another.", "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", @@ -1211,8 +1211,8 @@ func (PodExecOptions) SwaggerDoc() map[string]string { var map_PodList = map[string]string{ "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of pods. More info: http://releases.k8s.io/HEAD/docs/user-guide/pods.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "List of pods. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pods.md", } func (PodList) SwaggerDoc() map[string]string { @@ -1268,21 +1268,21 @@ func (PodSignature) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", - "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md", - "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#restartpolicy", + "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md", + "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/containers.md", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#restartpolicy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\".", - "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://releases.k8s.io/HEAD/docs/user-guide/node-selection/README.md", - "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md", + "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://releases.k8s.io/release-1.4/docs/user-guide/node-selection/README.md", + "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md", "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://releases.k8s.io/HEAD/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod", + "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://releases.k8s.io/release-1.4/docs/user-guide/images.md#specifying-imagepullsecrets-on-a-pod", "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", "subdomain": "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", } @@ -1293,14 +1293,14 @@ func (PodSpec) SwaggerDoc() map[string]string { var map_PodStatus = map[string]string{ "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.", - "phase": "Current condition of the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-phase", - "conditions": "Current service state of pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#pod-conditions", + "phase": "Current condition of the pod. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-phase", + "conditions": "Current service state of pod. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#pod-conditions", "message": "A human readable message indicating details about why the pod is in this condition.", "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'", "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-statuses", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1309,8 +1309,8 @@ func (PodStatus) SwaggerDoc() map[string]string { var map_PodStatusResult = map[string]string{ "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (PodStatusResult) SwaggerDoc() map[string]string { @@ -1319,8 +1319,8 @@ func (PodStatusResult) SwaggerDoc() map[string]string { var map_PodTemplate = map[string]string{ "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (PodTemplate) SwaggerDoc() map[string]string { @@ -1329,7 +1329,7 @@ func (PodTemplate) SwaggerDoc() map[string]string { var map_PodTemplateList = map[string]string{ "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", "items": "List of pod templates", } @@ -1339,8 +1339,8 @@ func (PodTemplateList) SwaggerDoc() map[string]string { var map_PodTemplateSpec = map[string]string{ "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (PodTemplateSpec) SwaggerDoc() map[string]string { @@ -1380,8 +1380,8 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { var map_Probe = map[string]string{ "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", - "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", - "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes", + "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes", + "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: http://releases.k8s.io/release-1.4/docs/user-guide/pod-states.md#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", @@ -1406,14 +1406,14 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string { var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#rbd", - "pool": "The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.", - "user": "The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#rbd", + "pool": "The rados pool name. Default is rbd. More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it.", + "user": "The rados user name. Default is admin. More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md#how-to-use-it", } func (RBDVolumeSource) SwaggerDoc() map[string]string { @@ -1422,7 +1422,7 @@ func (RBDVolumeSource) SwaggerDoc() map[string]string { var map_RangeAllocation = map[string]string{ "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "range": "Range is string that identifies the range represented by 'data'.", "data": "Data is a bit array containing all allocated addresses in the previous segment.", } @@ -1433,9 +1433,9 @@ func (RangeAllocation) SwaggerDoc() map[string]string { var map_ReplicationController = map[string]string{ "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (ReplicationController) SwaggerDoc() map[string]string { @@ -1444,8 +1444,8 @@ func (ReplicationController) SwaggerDoc() map[string]string { var map_ReplicationControllerList = map[string]string{ "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of replication controllers. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "List of replication controllers. More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md", } func (ReplicationControllerList) SwaggerDoc() map[string]string { @@ -1454,9 +1454,9 @@ func (ReplicationControllerList) SwaggerDoc() map[string]string { var map_ReplicationControllerSpec = map[string]string{ "": "ReplicationControllerSpec is the specification of a replication controller.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller", - "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller", + "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template", } func (ReplicationControllerSpec) SwaggerDoc() map[string]string { @@ -1465,7 +1465,7 @@ func (ReplicationControllerSpec) SwaggerDoc() map[string]string { var map_ReplicationControllerStatus = map[string]string{ "": "ReplicationControllerStatus represents the current status of a replication controller.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller", + "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", "readyReplicas": "The number of ready replicas for this replication controller.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.", @@ -1488,9 +1488,9 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (ResourceQuota) SwaggerDoc() map[string]string { @@ -1499,8 +1499,8 @@ func (ResourceQuota) SwaggerDoc() map[string]string { var map_ResourceQuotaList = map[string]string{ "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ResourceQuota objects. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of ResourceQuota objects. More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", } func (ResourceQuotaList) SwaggerDoc() map[string]string { @@ -1509,7 +1509,7 @@ func (ResourceQuotaList) SwaggerDoc() map[string]string { var map_ResourceQuotaSpec = map[string]string{ "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", - "hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "hard": "Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", } @@ -1519,7 +1519,7 @@ func (ResourceQuotaSpec) SwaggerDoc() map[string]string { var map_ResourceQuotaStatus = map[string]string{ "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", - "hard": "Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", + "hard": "Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/release-1.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota", "used": "Used is the current observed total usage of the resource in the namespace.", } @@ -1551,7 +1551,7 @@ func (SELinuxOptions) SwaggerDoc() map[string]string { var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "data": "Data contains the secret data. Each key must be a valid DNS_SUBDOMAIN or leading dot followed by valid DNS_SUBDOMAIN. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", "type": "Used to facilitate programmatic handling of secret data.", @@ -1572,8 +1572,8 @@ func (SecretKeySelector) SwaggerDoc() map[string]string { var map_SecretList = map[string]string{ "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "Items is a list of secret objects. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "Items is a list of secret objects. More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md", } func (SecretList) SwaggerDoc() map[string]string { @@ -1582,7 +1582,7 @@ func (SecretList) SwaggerDoc() map[string]string { var map_SecretVolumeSource = map[string]string{ "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", - "secretName": "Name of the secret in the pod's namespace to use. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets", + "secretName": "Name of the secret in the pod's namespace to use. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets", "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", } @@ -1616,9 +1616,9 @@ func (SerializedReference) SwaggerDoc() map[string]string { var map_Service = map[string]string{ "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (Service) SwaggerDoc() map[string]string { @@ -1627,9 +1627,9 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md", - "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/HEAD/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md", + "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: http://releases.k8s.io/release-1.4/docs/user-guide/secrets.md#manually-specifying-an-imagepullsecret", } func (ServiceAccount) SwaggerDoc() map[string]string { @@ -1638,8 +1638,8 @@ func (ServiceAccount) SwaggerDoc() map[string]string { var map_ServiceAccountList = map[string]string{ "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of ServiceAccounts. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md#service-accounts", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "List of ServiceAccounts. More info: http://releases.k8s.io/release-1.4/docs/design/service_accounts.md#service-accounts", } func (ServiceAccountList) SwaggerDoc() map[string]string { @@ -1648,7 +1648,7 @@ func (ServiceAccountList) SwaggerDoc() map[string]string { var map_ServiceList = map[string]string{ "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", "items": "List of services", } @@ -1661,8 +1661,8 @@ var map_ServicePort = map[string]string{ "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.", "port": "The port that will be exposed by this service.", - "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#type--nodeport", + "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#defining-a-service", + "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#type--nodeport", } func (ServicePort) SwaggerDoc() map[string]string { @@ -1680,15 +1680,15 @@ func (ServiceProxyOptions) SwaggerDoc() map[string]string { var map_ServiceSpec = map[string]string{ "": "ServiceSpec describes the attributes that a user creates on a service.", - "ports": "The list of ports that are exposed by this service. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies", - "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview", - "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#overview", + "ports": "The list of ports that are exposed by this service. More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies", + "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview", + "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#overview", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system. A previous form of this functionality exists as the deprecatedPublicIPs field. When using this field, callers should also clear the deprecatedPublicIPs field.", "deprecatedPublicIPs": "deprecatedPublicIPs is deprecated and replaced by the externalIPs field with almost the exact same semantics. This field is retained in the v1 API for compatibility until at least 8/20/2016. It will be removed from any new API revisions. If both deprecatedPublicIPs *and* externalIPs are set, deprecatedPublicIPs is used.", - "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/HEAD/docs/user-guide/services.md#virtual-ips-and-service-proxies", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: http://releases.k8s.io/release-1.4/docs/user-guide/services.md#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: http://releases.k8s.io/HEAD/docs/user-guide/services-firewalls.md", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: http://releases.k8s.io/release-1.4/docs/user-guide/services-firewalls.md", "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires Type to be ExternalName.", } @@ -1739,7 +1739,7 @@ func (Toleration) SwaggerDoc() map[string]string { var map_Volume = map[string]string{ "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", - "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", + "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names", } func (Volume) SwaggerDoc() map[string]string { @@ -1760,19 +1760,19 @@ func (VolumeMount) SwaggerDoc() map[string]string { var map_VolumeSource = map[string]string{ "": "Represents the source of a volume to mount. Only one of its members may be specified.", - "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#hostpath", - "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#emptydir", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#awselasticblockstore", + "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#hostpath", + "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#emptydir", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#awselasticblockstore", "gitRepo": "GitRepo represents a git repository at a particular revision.", - "secret": "Secret represents a secret that should populate this volume. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets", - "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "secret": "Secret represents a secret that should populate this volume. More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#secrets", + "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: http://releases.k8s.io/release-1.4/docs/user-guide/volumes.md#nfs", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/release-1.4/examples/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.4/examples/volumes/glusterfs/README.md", + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/release-1.4/docs/user-guide/persistent-volumes.md#persistentvolumeclaims", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/release-1.4/examples/volumes/rbd/README.md", "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/release-1.4/examples/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", diff --git a/vendor/k8s.io/client-go/1.4/pkg/api/validation/validation.go b/vendor/k8s.io/client-go/1.4/pkg/api/validation/validation.go index 483a88d71f01..27858d6d4101 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/api/validation/validation.go +++ b/vendor/k8s.io/client-go/1.4/pkg/api/validation/validation.go @@ -438,6 +438,11 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.P allErrs = append(allErrs, field.Invalid(fldPath.Child("deletionTimestamp"), newMeta.DeletionTimestamp, "field is immutable; may only be changed via deletion")) } + // Finalizers cannot be added if the object is already being deleted. + if oldMeta.DeletionTimestamp != nil { + allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.Finalizers, oldMeta.Finalizers, fldPath.Child("finalizers"))...) + } + // Reject updates that don't specify a resource version if len(newMeta.ResourceVersion) == 0 { allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.ResourceVersion, "must be specified for an update")) @@ -461,6 +466,16 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *api.ObjectMeta, fldPath *field.P return allErrs } +func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { + const newFinalizersErrorMsg string = `no new finalizers can be added if the object is being deleted` + allErrs := field.ErrorList{} + extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...)) + if len(extra) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List()))) + } + return allErrs +} + func validateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, field.ErrorList) { allErrs := field.ErrorList{} @@ -2553,6 +2568,9 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) } + // TODO(freehan): allow user to update loadbalancerSourceRanges + allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...) + allErrs = append(allErrs, ValidateService(service)...) return allErrs } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/apps/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/apps/types.go index e67a8b88a2ec..23e380f6b568 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/apps/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/apps/types.go @@ -51,7 +51,7 @@ type PetSetSpec struct { // Selector is a label query over pods that should match the replica count. // If empty, defaulted to labels on the pod template. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *unversioned.LabelSelector `json:"selector,omitempty"` // Template is the object that describes the pod that will be created if diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/generated.proto index 2338042bfd0d..b2d0c9642198 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/generated.proto +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/generated.proto @@ -66,7 +66,7 @@ message PetSetSpec { // Selector is a label query over pods that should match the replica count. // If empty, defaulted to labels on the pod template. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors optional k8s.io.kubernetes.pkg.api.unversioned.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/types.go index 800b12aa9fc7..e76c30d6d061 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/types.go @@ -51,7 +51,7 @@ type PetSetSpec struct { // Selector is a label query over pods that should match the replica count. // If empty, defaulted to labels on the pod template. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *unversioned.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go index 5191f1224cd7..66adf1607ced 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/apps/v1alpha1/types_swagger_doc_generated.go @@ -48,7 +48,7 @@ func (PetSetList) SwaggerDoc() map[string]string { var map_PetSetSpec = map[string]string{ "": "A PetSetSpec is the specification of a PetSet.", "replicas": "Replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "selector": "Selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the PetSet will fulfill this Template, but have a unique identity from the rest of the PetSet.", "volumeClaimTemplates": "VolumeClaimTemplates is a list of claims that pets are allowed to reference. The PetSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pet. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "ServiceName is the name of the service that governs this PetSet. This service must exist before the PetSet, and is responsible for the network identity of the set. Pets get DNS/hostnames that follow the pattern: pet-specific-string.serviceName.default.svc.cluster.local where \"pet-specific-string\" is managed by the PetSet controller.", diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/types.go index 26f6f7185e59..04a2874ad519 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/types.go @@ -24,13 +24,13 @@ import ( // Scale represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata. api.ObjectMeta `json:"metadata,omitempty"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Spec ScaleSpec `json:"spec,omitempty"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only. Status ScaleStatus `json:"status,omitempty"` } @@ -48,15 +48,15 @@ type ScaleStatus struct { // label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector string `json:"selector,omitempty"` } // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // Name of the referent; More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // API version of the referent APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` @@ -103,7 +103,7 @@ type HorizontalPodAutoscaler struct { unversioned.TypeMeta `json:",inline"` api.ObjectMeta `json:"metadata,omitempty"` - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty"` // current information about the autoscaler. diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/generated.proto index 891aff3b52b2..ac863415d0b7 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/generated.proto @@ -32,10 +32,10 @@ option go_package = "v1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds" optional string kind = 1; - // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // Name of the referent; More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names optional string name = 2; // API version of the referent @@ -44,10 +44,10 @@ message CrossVersionObjectReference { // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { - // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. optional HorizontalPodAutoscalerSpec spec = 2; // current information about the autoscaler. @@ -102,13 +102,13 @@ message HorizontalPodAutoscalerStatus { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata. optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. optional ScaleSpec spec = 2; - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only. optional ScaleStatus status = 3; } @@ -126,7 +126,7 @@ message ScaleStatus { // label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info about label selectors: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors optional string selector = 2; } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/types.go index e7b304a1e37f..9b0716134cf4 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/types.go @@ -23,9 +23,9 @@ import ( // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" + // Kind of the referent; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // Name of the referent; More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // API version of the referent APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` @@ -70,10 +70,10 @@ type HorizontalPodAutoscalerStatus struct { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // current information about the autoscaler. @@ -93,13 +93,13 @@ type HorizontalPodAutoscalerList struct { // Scale represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata. v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only. Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -117,6 +117,6 @@ type ScaleStatus struct { // label query over pods that should match the replicas count. This is same // as the label selector but in the string format to avoid introspection // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info about label selectors: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go index 6b9bcf47e80d..cf7c7c839fb7 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/autoscaling/v1/types_swagger_doc_generated.go @@ -29,8 +29,8 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", + "kind": "Kind of the referent; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names", "apiVersion": "API version of the referent", } @@ -40,8 +40,8 @@ func (CrossVersionObjectReference) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -86,9 +86,9 @@ func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -107,7 +107,7 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "ScaleStatus represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/types.go index d0d9a7490c8a..fb5178a1d67e 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/types.go @@ -27,15 +27,15 @@ import ( type Job struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata api.ObjectMeta `json:"metadata,omitempty"` // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec JobSpec `json:"spec,omitempty"` // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status JobStatus `json:"status,omitempty"` } @@ -43,7 +43,7 @@ type Job struct { type JobList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of Job. @@ -54,22 +54,22 @@ type JobList struct { type JobTemplate struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata api.ObjectMeta `json:"metadata,omitempty"` // Template defines jobs that will be created from this template - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Template JobTemplateSpec `json:"template,omitempty"` } // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata api.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec JobSpec `json:"spec,omitempty"` } @@ -171,15 +171,15 @@ type JobCondition struct { type ScheduledJob struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata api.ObjectMeta `json:"metadata,omitempty"` // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec ScheduledJobSpec `json:"spec,omitempty"` // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status ScheduledJobStatus `json:"status,omitempty"` } @@ -187,7 +187,7 @@ type ScheduledJob struct { type ScheduledJobList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of ScheduledJob. diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/generated.proto index 264959e4f8c2..13b6bb5712c1 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/generated.proto +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/generated.proto @@ -33,15 +33,15 @@ option go_package = "v1"; // Job represents the configuration of a single job. message Job { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional JobSpec spec = 2; // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional JobStatus status = 3; } @@ -69,7 +69,7 @@ message JobCondition { // JobList is a collection of jobs. message JobList { // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is the list of Job. @@ -82,7 +82,7 @@ message JobSpec { // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md optional int32 parallelism = 1; // Completions specifies the desired number of successfully finished pods the @@ -90,7 +90,7 @@ message JobSpec { // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md optional int32 completions = 2; // Optional duration in seconds relative to the startTime that the job may be active @@ -99,7 +99,7 @@ message JobSpec { // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors optional LabelSelector selector = 4; // ManualSelector controls generation of pod labels and pod selectors. @@ -111,19 +111,19 @@ message JobSpec { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md optional bool manualSelector = 5; // Template is the object that describes the pod that will be created when // executing a job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; } // JobStatus represents the current state of a Job. message JobStatus { // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md repeated JobCondition conditions = 1; // StartTime represents time when the job was acknowledged by the Job Manager. diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/types.go index 29f4cdfadaf8..77d8900587a2 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/types.go @@ -27,15 +27,15 @@ import ( type Job struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -43,7 +43,7 @@ type Job struct { type JobList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Job. @@ -57,7 +57,7 @@ type JobSpec struct { // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Completions specifies the desired number of successfully finished pods the @@ -65,7 +65,7 @@ type JobSpec struct { // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` // Optional duration in seconds relative to the startTime that the job may be active @@ -74,7 +74,7 @@ type JobSpec struct { // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // ManualSelector controls generation of pod labels and pod selectors. @@ -86,12 +86,12 @@ type JobSpec struct { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` // Template is the object that describes the pod that will be created when // executing a job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` } @@ -99,7 +99,7 @@ type JobSpec struct { type JobStatus struct { // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // StartTime represents time when the job was acknowledged by the Job Manager. diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/types_swagger_doc_generated.go index aa0dbcc2fd16..491d23ac1292 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is the list of Job.", } @@ -64,12 +64,12 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", - "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", + "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", } func (JobSpec) SwaggerDoc() map[string]string { @@ -78,7 +78,7 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "active": "Active is the number of actively running pods.", diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/generated.proto index 95e371e2f7ab..df8cb7d2593b 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/generated.proto @@ -33,15 +33,15 @@ option go_package = "v2alpha1"; // Job represents the configuration of a single job. message Job { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional JobSpec spec = 2; // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional JobStatus status = 3; } @@ -69,7 +69,7 @@ message JobCondition { // JobList is a collection of jobs. message JobList { // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is the list of Job. @@ -82,7 +82,7 @@ message JobSpec { // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md optional int32 parallelism = 1; // Completions specifies the desired number of successfully finished pods the @@ -90,7 +90,7 @@ message JobSpec { // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md optional int32 completions = 2; // Optional duration in seconds relative to the startTime that the job may be active @@ -99,7 +99,7 @@ message JobSpec { // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors optional LabelSelector selector = 4; // ManualSelector controls generation of pod labels and pod selectors. @@ -111,19 +111,19 @@ message JobSpec { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md optional bool manualSelector = 5; // Template is the object that describes the pod that will be created when // executing a job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; } // JobStatus represents the current state of a Job. message JobStatus { // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md repeated JobCondition conditions = 1; // StartTime represents time when the job was acknowledged by the Job Manager. @@ -149,22 +149,22 @@ message JobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Template defines jobs that will be created from this template - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional JobTemplateSpec template = 2; } // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional JobSpec spec = 2; } @@ -201,22 +201,22 @@ message LabelSelectorRequirement { // ScheduledJob represents the configuration of a single scheduled job. message ScheduledJob { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ScheduledJobSpec spec = 2; // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ScheduledJobStatus status = 3; } // ScheduledJobList is a collection of scheduled jobs. message ScheduledJobList { // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is the list of ScheduledJob. diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/types.go index dc8e08e26968..6d73b53c21f4 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/types.go @@ -25,15 +25,15 @@ import ( type Job struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -41,7 +41,7 @@ type Job struct { type JobList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Job. @@ -52,22 +52,22 @@ type JobList struct { type JobTemplate struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines jobs that will be created from this template - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -78,7 +78,7 @@ type JobSpec struct { // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Completions specifies the desired number of successfully finished pods the @@ -86,7 +86,7 @@ type JobSpec struct { // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` // Optional duration in seconds relative to the startTime that the job may be active @@ -95,7 +95,7 @@ type JobSpec struct { // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // ManualSelector controls generation of pod labels and pod selectors. @@ -107,12 +107,12 @@ type JobSpec struct { // and other jobs to not function correctly. However, You may see // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` // API. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` // Template is the object that describes the pod that will be created when // executing a job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` } @@ -120,7 +120,7 @@ type JobSpec struct { type JobStatus struct { // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // StartTime represents time when the job was acknowledged by the Job Manager. @@ -173,15 +173,15 @@ type JobCondition struct { type ScheduledJob struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job, including the schedule. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec ScheduledJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status ScheduledJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -189,7 +189,7 @@ type ScheduledJob struct { type ScheduledJobList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of ScheduledJob. diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go index 17d43318df1b..710e3e155f93 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/batch/v2alpha1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v2alpha1 // AUTO-GENERATED FUNCTIONS START HERE var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is the list of Job.", } @@ -64,12 +64,12 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", - "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", + "manualSelector": "ManualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", } func (JobSpec) SwaggerDoc() map[string]string { @@ -78,7 +78,7 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "active": "Active is the number of actively running pods.", @@ -92,8 +92,8 @@ func (JobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "template": "Template defines jobs that will be created from this template http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "template": "Template defines jobs that will be created from this template http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -102,8 +102,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { @@ -133,9 +133,9 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { var map_ScheduledJob = map[string]string{ "": "ScheduledJob represents the configuration of a single scheduled job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job, including the schedule. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job, including the schedule. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (ScheduledJob) SwaggerDoc() map[string]string { @@ -144,7 +144,7 @@ func (ScheduledJob) SwaggerDoc() map[string]string { var map_ScheduledJobList = map[string]string{ "": "ScheduledJobList is a collection of scheduled jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is the list of ScheduledJob.", } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/types.generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/types.generated.go index 98fa5935c7ed..c52be1ee221a 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/types.generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/types.generated.go @@ -1218,36 +1218,36 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } else { yysep130 := !z.EncBinary() yy2arr130 := z.EncBasicHandle().StructToArray - var yyq130 [105]bool + var yyq130 [107]bool _, _, _ = yysep130, yyq130, yy2arr130 const yyr130 bool = false yyq130[0] = x.Kind != "" yyq130[1] = x.APIVersion != "" - yyq130[51] = x.CloudProvider != "" - yyq130[52] = x.CloudConfigFile != "" - yyq130[53] = x.KubeletCgroups != "" - yyq130[54] = x.CgroupsPerQOS != false - yyq130[55] = x.RuntimeCgroups != "" - yyq130[56] = x.SystemCgroups != "" - yyq130[57] = x.CgroupRoot != "" - yyq130[61] = true - yyq130[62] = x.RktPath != "" - yyq130[63] = x.RktAPIEndpoint != "" - yyq130[64] = x.RktStage1Image != "" - yyq130[85] = true - yyq130[86] = x.NodeIP != "" - yyq130[90] = x.EvictionHard != "" - yyq130[91] = x.EvictionSoft != "" - yyq130[92] = x.EvictionSoftGracePeriod != "" - yyq130[93] = true - yyq130[94] = x.EvictionMaxPodGracePeriod != 0 - yyq130[95] = x.EvictionMinimumReclaim != "" - yyq130[104] = len(x.AllowedUnsafeSysctls) != 0 + yyq130[53] = x.CloudProvider != "" + yyq130[54] = x.CloudConfigFile != "" + yyq130[55] = x.KubeletCgroups != "" + yyq130[56] = x.CgroupsPerQOS != false + yyq130[57] = x.RuntimeCgroups != "" + yyq130[58] = x.SystemCgroups != "" + yyq130[59] = x.CgroupRoot != "" + yyq130[63] = true + yyq130[64] = x.RktPath != "" + yyq130[65] = x.RktAPIEndpoint != "" + yyq130[66] = x.RktStage1Image != "" + yyq130[87] = true + yyq130[88] = x.NodeIP != "" + yyq130[92] = x.EvictionHard != "" + yyq130[93] = x.EvictionSoft != "" + yyq130[94] = x.EvictionSoftGracePeriod != "" + yyq130[95] = true + yyq130[96] = x.EvictionMaxPodGracePeriod != 0 + yyq130[97] = x.EvictionMinimumReclaim != "" + yyq130[106] = len(x.AllowedUnsafeSysctls) != 0 var yynn130 int if yyr130 || yy2arr130 { - r.EncodeArrayStart(105) + r.EncodeArrayStart(107) } else { - yynn130 = 83 + yynn130 = 85 for _, b := range yyq130 { if b { yynn130++ @@ -2312,24 +2312,62 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { _ = yym298 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir)) + r.EncodeString(codecSelferC_UTF81234, string(x.CNIConfDir)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("volumePluginDir")) + r.EncodeString(codecSelferC_UTF81234, string("cniConfDir")) z.EncSendContainerState(codecSelfer_containerMapValue1234) yym299 := z.EncBinary() _ = yym299 if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.CNIConfDir)) + } + } + if yyr130 || yy2arr130 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym301 := z.EncBinary() + _ = yym301 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.CNIBinDir)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("cniBinDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym302 := z.EncBinary() + _ = yym302 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.CNIBinDir)) + } + } + if yyr130 || yy2arr130 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym304 := z.EncBinary() + _ = yym304 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("volumePluginDir")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym305 := z.EncBinary() + _ = yym305 + if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.VolumePluginDir)) } } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[51] { - yym301 := z.EncBinary() - _ = yym301 + if yyq130[53] { + yym307 := z.EncBinary() + _ = yym307 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) @@ -2338,12 +2376,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[51] { + if yyq130[53] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cloudProvider")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym302 := z.EncBinary() - _ = yym302 + yym308 := z.EncBinary() + _ = yym308 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) @@ -2352,9 +2390,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[52] { - yym304 := z.EncBinary() - _ = yym304 + if yyq130[54] { + yym310 := z.EncBinary() + _ = yym310 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) @@ -2363,12 +2401,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[52] { + if yyq130[54] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cloudConfigFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym305 := z.EncBinary() - _ = yym305 + yym311 := z.EncBinary() + _ = yym311 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) @@ -2377,9 +2415,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[53] { - yym307 := z.EncBinary() - _ = yym307 + if yyq130[55] { + yym313 := z.EncBinary() + _ = yym313 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups)) @@ -2388,12 +2426,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[53] { + if yyq130[55] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeletCgroups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym308 := z.EncBinary() - _ = yym308 + yym314 := z.EncBinary() + _ = yym314 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.KubeletCgroups)) @@ -2402,9 +2440,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[54] { - yym310 := z.EncBinary() - _ = yym310 + if yyq130[56] { + yym316 := z.EncBinary() + _ = yym316 if false { } else { r.EncodeBool(bool(x.CgroupsPerQOS)) @@ -2413,12 +2451,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeBool(false) } } else { - if yyq130[54] { + if yyq130[56] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("CgroupsPerQOS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym311 := z.EncBinary() - _ = yym311 + yym317 := z.EncBinary() + _ = yym317 if false { } else { r.EncodeBool(bool(x.CgroupsPerQOS)) @@ -2427,9 +2465,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[55] { - yym313 := z.EncBinary() - _ = yym313 + if yyq130[57] { + yym319 := z.EncBinary() + _ = yym319 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups)) @@ -2438,12 +2476,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[55] { + if yyq130[57] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("runtimeCgroups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym314 := z.EncBinary() - _ = yym314 + yym320 := z.EncBinary() + _ = yym320 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RuntimeCgroups)) @@ -2452,9 +2490,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[56] { - yym316 := z.EncBinary() - _ = yym316 + if yyq130[58] { + yym322 := z.EncBinary() + _ = yym322 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups)) @@ -2463,12 +2501,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[56] { + if yyq130[58] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("systemCgroups")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym317 := z.EncBinary() - _ = yym317 + yym323 := z.EncBinary() + _ = yym323 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SystemCgroups)) @@ -2477,9 +2515,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[57] { - yym319 := z.EncBinary() - _ = yym319 + if yyq130[59] { + yym325 := z.EncBinary() + _ = yym325 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot)) @@ -2488,12 +2526,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[57] { + if yyq130[59] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cgroupRoot")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym320 := z.EncBinary() - _ = yym320 + yym326 := z.EncBinary() + _ = yym326 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CgroupRoot)) @@ -2502,8 +2540,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym322 := z.EncBinary() - _ = yym322 + yym328 := z.EncBinary() + _ = yym328 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime)) @@ -2512,8 +2550,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("containerRuntime")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym323 := z.EncBinary() - _ = yym323 + yym329 := z.EncBinary() + _ = yym329 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContainerRuntime)) @@ -2521,8 +2559,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym325 := z.EncBinary() - _ = yym325 + yym331 := z.EncBinary() + _ = yym331 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RemoteRuntimeEndpoint)) @@ -2531,8 +2569,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("remoteRuntimeEndpoint")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym326 := z.EncBinary() - _ = yym326 + yym332 := z.EncBinary() + _ = yym332 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RemoteRuntimeEndpoint)) @@ -2540,8 +2578,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym328 := z.EncBinary() - _ = yym328 + yym334 := z.EncBinary() + _ = yym334 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RemoteImageEndpoint)) @@ -2550,8 +2588,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("remoteImageEndpoint")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym329 := z.EncBinary() - _ = yym329 + yym335 := z.EncBinary() + _ = yym335 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RemoteImageEndpoint)) @@ -2559,42 +2597,42 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[61] { - yy331 := &x.RuntimeRequestTimeout - yym332 := z.EncBinary() - _ = yym332 + if yyq130[63] { + yy337 := &x.RuntimeRequestTimeout + yym338 := z.EncBinary() + _ = yym338 if false { - } else if z.HasExtensions() && z.EncExt(yy331) { - } else if !yym332 && z.IsJSONHandle() { - z.EncJSONMarshal(yy331) + } else if z.HasExtensions() && z.EncExt(yy337) { + } else if !yym338 && z.IsJSONHandle() { + z.EncJSONMarshal(yy337) } else { - z.EncFallback(yy331) + z.EncFallback(yy337) } } else { r.EncodeNil() } } else { - if yyq130[61] { + if yyq130[63] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("runtimeRequestTimeout")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy333 := &x.RuntimeRequestTimeout - yym334 := z.EncBinary() - _ = yym334 + yy339 := &x.RuntimeRequestTimeout + yym340 := z.EncBinary() + _ = yym340 if false { - } else if z.HasExtensions() && z.EncExt(yy333) { - } else if !yym334 && z.IsJSONHandle() { - z.EncJSONMarshal(yy333) + } else if z.HasExtensions() && z.EncExt(yy339) { + } else if !yym340 && z.IsJSONHandle() { + z.EncJSONMarshal(yy339) } else { - z.EncFallback(yy333) + z.EncFallback(yy339) } } } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[62] { - yym336 := z.EncBinary() - _ = yym336 + if yyq130[64] { + yym342 := z.EncBinary() + _ = yym342 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktPath)) @@ -2603,12 +2641,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[62] { + if yyq130[64] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rktPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym337 := z.EncBinary() - _ = yym337 + yym343 := z.EncBinary() + _ = yym343 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktPath)) @@ -2617,9 +2655,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[63] { - yym339 := z.EncBinary() - _ = yym339 + if yyq130[65] { + yym345 := z.EncBinary() + _ = yym345 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint)) @@ -2628,12 +2666,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[63] { + if yyq130[65] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rktAPIEndpoint")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym340 := z.EncBinary() - _ = yym340 + yym346 := z.EncBinary() + _ = yym346 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktAPIEndpoint)) @@ -2642,9 +2680,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[64] { - yym342 := z.EncBinary() - _ = yym342 + if yyq130[66] { + yym348 := z.EncBinary() + _ = yym348 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image)) @@ -2653,12 +2691,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[64] { + if yyq130[66] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rktStage1Image")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym343 := z.EncBinary() - _ = yym343 + yym349 := z.EncBinary() + _ = yym349 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RktStage1Image)) @@ -2667,8 +2705,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym345 := z.EncBinary() - _ = yym345 + yym351 := z.EncBinary() + _ = yym351 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) @@ -2677,8 +2715,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lockFilePath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym346 := z.EncBinary() - _ = yym346 + yym352 := z.EncBinary() + _ = yym352 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.LockFilePath)) @@ -2686,8 +2724,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym348 := z.EncBinary() - _ = yym348 + yym354 := z.EncBinary() + _ = yym354 if false { } else { r.EncodeBool(bool(x.ExitOnLockContention)) @@ -2696,8 +2734,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("exitOnLockContention")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym349 := z.EncBinary() - _ = yym349 + yym355 := z.EncBinary() + _ = yym355 if false { } else { r.EncodeBool(bool(x.ExitOnLockContention)) @@ -2705,8 +2743,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym351 := z.EncBinary() - _ = yym351 + yym357 := z.EncBinary() + _ = yym357 if false { } else { r.EncodeBool(bool(x.ConfigureCBR0)) @@ -2715,8 +2753,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("configureCbr0")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym352 := z.EncBinary() - _ = yym352 + yym358 := z.EncBinary() + _ = yym358 if false { } else { r.EncodeBool(bool(x.ConfigureCBR0)) @@ -2724,8 +2762,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym354 := z.EncBinary() - _ = yym354 + yym360 := z.EncBinary() + _ = yym360 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode)) @@ -2734,8 +2772,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hairpinMode")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym355 := z.EncBinary() - _ = yym355 + yym361 := z.EncBinary() + _ = yym361 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.HairpinMode)) @@ -2743,8 +2781,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym357 := z.EncBinary() - _ = yym357 + yym363 := z.EncBinary() + _ = yym363 if false { } else { r.EncodeBool(bool(x.BabysitDaemons)) @@ -2753,8 +2791,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("babysitDaemons")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym358 := z.EncBinary() - _ = yym358 + yym364 := z.EncBinary() + _ = yym364 if false { } else { r.EncodeBool(bool(x.BabysitDaemons)) @@ -2762,8 +2800,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym360 := z.EncBinary() - _ = yym360 + yym366 := z.EncBinary() + _ = yym366 if false { } else { r.EncodeInt(int64(x.MaxPods)) @@ -2772,8 +2810,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("maxPods")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym361 := z.EncBinary() - _ = yym361 + yym367 := z.EncBinary() + _ = yym367 if false { } else { r.EncodeInt(int64(x.MaxPods)) @@ -2781,8 +2819,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym363 := z.EncBinary() - _ = yym363 + yym369 := z.EncBinary() + _ = yym369 if false { } else { r.EncodeInt(int64(x.NvidiaGPUs)) @@ -2791,8 +2829,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nvidiaGPUs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym364 := z.EncBinary() - _ = yym364 + yym370 := z.EncBinary() + _ = yym370 if false { } else { r.EncodeInt(int64(x.NvidiaGPUs)) @@ -2800,8 +2838,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym366 := z.EncBinary() - _ = yym366 + yym372 := z.EncBinary() + _ = yym372 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName)) @@ -2810,8 +2848,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("dockerExecHandlerName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym367 := z.EncBinary() - _ = yym367 + yym373 := z.EncBinary() + _ = yym373 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.DockerExecHandlerName)) @@ -2819,8 +2857,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym369 := z.EncBinary() - _ = yym369 + yym375 := z.EncBinary() + _ = yym375 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) @@ -2829,8 +2867,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym370 := z.EncBinary() - _ = yym370 + yym376 := z.EncBinary() + _ = yym376 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodCIDR)) @@ -2838,8 +2876,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym372 := z.EncBinary() - _ = yym372 + yym378 := z.EncBinary() + _ = yym378 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig)) @@ -2848,8 +2886,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("resolvConf")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym373 := z.EncBinary() - _ = yym373 + yym379 := z.EncBinary() + _ = yym379 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ResolverConfig)) @@ -2857,8 +2895,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym375 := z.EncBinary() - _ = yym375 + yym381 := z.EncBinary() + _ = yym381 if false { } else { r.EncodeBool(bool(x.CPUCFSQuota)) @@ -2867,8 +2905,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cpuCFSQuota")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym376 := z.EncBinary() - _ = yym376 + yym382 := z.EncBinary() + _ = yym382 if false { } else { r.EncodeBool(bool(x.CPUCFSQuota)) @@ -2876,8 +2914,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym378 := z.EncBinary() - _ = yym378 + yym384 := z.EncBinary() + _ = yym384 if false { } else { r.EncodeBool(bool(x.Containerized)) @@ -2886,8 +2924,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("containerized")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym379 := z.EncBinary() - _ = yym379 + yym385 := z.EncBinary() + _ = yym385 if false { } else { r.EncodeBool(bool(x.Containerized)) @@ -2895,8 +2933,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym381 := z.EncBinary() - _ = yym381 + yym387 := z.EncBinary() + _ = yym387 if false { } else { r.EncodeInt(int64(x.MaxOpenFiles)) @@ -2905,8 +2943,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("maxOpenFiles")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym382 := z.EncBinary() - _ = yym382 + yym388 := z.EncBinary() + _ = yym388 if false { } else { r.EncodeInt(int64(x.MaxOpenFiles)) @@ -2914,8 +2952,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym384 := z.EncBinary() - _ = yym384 + yym390 := z.EncBinary() + _ = yym390 if false { } else { r.EncodeBool(bool(x.ReconcileCIDR)) @@ -2924,8 +2962,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("reconcileCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym385 := z.EncBinary() - _ = yym385 + yym391 := z.EncBinary() + _ = yym391 if false { } else { r.EncodeBool(bool(x.ReconcileCIDR)) @@ -2933,8 +2971,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym387 := z.EncBinary() - _ = yym387 + yym393 := z.EncBinary() + _ = yym393 if false { } else { r.EncodeBool(bool(x.RegisterSchedulable)) @@ -2943,8 +2981,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("registerSchedulable")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym388 := z.EncBinary() - _ = yym388 + yym394 := z.EncBinary() + _ = yym394 if false { } else { r.EncodeBool(bool(x.RegisterSchedulable)) @@ -2952,8 +2990,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym390 := z.EncBinary() - _ = yym390 + yym396 := z.EncBinary() + _ = yym396 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) @@ -2962,8 +3000,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("contentType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym391 := z.EncBinary() - _ = yym391 + yym397 := z.EncBinary() + _ = yym397 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) @@ -2971,8 +3009,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym393 := z.EncBinary() - _ = yym393 + yym399 := z.EncBinary() + _ = yym399 if false { } else { r.EncodeInt(int64(x.KubeAPIQPS)) @@ -2981,8 +3019,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym394 := z.EncBinary() - _ = yym394 + yym400 := z.EncBinary() + _ = yym400 if false { } else { r.EncodeInt(int64(x.KubeAPIQPS)) @@ -2990,8 +3028,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym396 := z.EncBinary() - _ = yym396 + yym402 := z.EncBinary() + _ = yym402 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) @@ -3000,8 +3038,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym397 := z.EncBinary() - _ = yym397 + yym403 := z.EncBinary() + _ = yym403 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) @@ -3009,8 +3047,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym399 := z.EncBinary() - _ = yym399 + yym405 := z.EncBinary() + _ = yym405 if false { } else { r.EncodeBool(bool(x.SerializeImagePulls)) @@ -3019,8 +3057,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("serializeImagePulls")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym400 := z.EncBinary() - _ = yym400 + yym406 := z.EncBinary() + _ = yym406 if false { } else { r.EncodeBool(bool(x.SerializeImagePulls)) @@ -3028,8 +3066,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym402 := z.EncBinary() - _ = yym402 + yym408 := z.EncBinary() + _ = yym408 if false { } else { r.EncodeBool(bool(x.ExperimentalFlannelOverlay)) @@ -3038,8 +3076,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("experimentalFlannelOverlay")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym403 := z.EncBinary() - _ = yym403 + yym409 := z.EncBinary() + _ = yym409 if false { } else { r.EncodeBool(bool(x.ExperimentalFlannelOverlay)) @@ -3047,42 +3085,42 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[85] { - yy405 := &x.OutOfDiskTransitionFrequency - yym406 := z.EncBinary() - _ = yym406 + if yyq130[87] { + yy411 := &x.OutOfDiskTransitionFrequency + yym412 := z.EncBinary() + _ = yym412 if false { - } else if z.HasExtensions() && z.EncExt(yy405) { - } else if !yym406 && z.IsJSONHandle() { - z.EncJSONMarshal(yy405) + } else if z.HasExtensions() && z.EncExt(yy411) { + } else if !yym412 && z.IsJSONHandle() { + z.EncJSONMarshal(yy411) } else { - z.EncFallback(yy405) + z.EncFallback(yy411) } } else { r.EncodeNil() } } else { - if yyq130[85] { + if yyq130[87] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("outOfDiskTransitionFrequency")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy407 := &x.OutOfDiskTransitionFrequency - yym408 := z.EncBinary() - _ = yym408 + yy413 := &x.OutOfDiskTransitionFrequency + yym414 := z.EncBinary() + _ = yym414 if false { - } else if z.HasExtensions() && z.EncExt(yy407) { - } else if !yym408 && z.IsJSONHandle() { - z.EncJSONMarshal(yy407) + } else if z.HasExtensions() && z.EncExt(yy413) { + } else if !yym414 && z.IsJSONHandle() { + z.EncJSONMarshal(yy413) } else { - z.EncFallback(yy407) + z.EncFallback(yy413) } } } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[86] { - yym410 := z.EncBinary() - _ = yym410 + if yyq130[88] { + yym416 := z.EncBinary() + _ = yym416 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP)) @@ -3091,12 +3129,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[86] { + if yyq130[88] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeIP")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym411 := z.EncBinary() - _ = yym411 + yym417 := z.EncBinary() + _ = yym417 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NodeIP)) @@ -3108,8 +3146,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.NodeLabels == nil { r.EncodeNil() } else { - yym413 := z.EncBinary() - _ = yym413 + yym419 := z.EncBinary() + _ = yym419 if false { } else { z.F.EncMapStringStringV(x.NodeLabels, false, e) @@ -3122,8 +3160,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.NodeLabels == nil { r.EncodeNil() } else { - yym414 := z.EncBinary() - _ = yym414 + yym420 := z.EncBinary() + _ = yym420 if false { } else { z.F.EncMapStringStringV(x.NodeLabels, false, e) @@ -3132,8 +3170,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym416 := z.EncBinary() - _ = yym416 + yym422 := z.EncBinary() + _ = yym422 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR)) @@ -3142,8 +3180,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nonMasqueradeCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym417 := z.EncBinary() - _ = yym417 + yym423 := z.EncBinary() + _ = yym423 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.NonMasqueradeCIDR)) @@ -3151,8 +3189,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym419 := z.EncBinary() - _ = yym419 + yym425 := z.EncBinary() + _ = yym425 if false { } else { r.EncodeBool(bool(x.EnableCustomMetrics)) @@ -3161,8 +3199,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("enableCustomMetrics")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym420 := z.EncBinary() - _ = yym420 + yym426 := z.EncBinary() + _ = yym426 if false { } else { r.EncodeBool(bool(x.EnableCustomMetrics)) @@ -3170,9 +3208,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[90] { - yym422 := z.EncBinary() - _ = yym422 + if yyq130[92] { + yym428 := z.EncBinary() + _ = yym428 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard)) @@ -3181,12 +3219,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[90] { + if yyq130[92] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("evictionHard")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym423 := z.EncBinary() - _ = yym423 + yym429 := z.EncBinary() + _ = yym429 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvictionHard)) @@ -3195,9 +3233,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[91] { - yym425 := z.EncBinary() - _ = yym425 + if yyq130[93] { + yym431 := z.EncBinary() + _ = yym431 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft)) @@ -3206,12 +3244,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[91] { + if yyq130[93] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("evictionSoft")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym426 := z.EncBinary() - _ = yym426 + yym432 := z.EncBinary() + _ = yym432 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoft)) @@ -3220,9 +3258,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[92] { - yym428 := z.EncBinary() - _ = yym428 + if yyq130[94] { + yym434 := z.EncBinary() + _ = yym434 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod)) @@ -3231,12 +3269,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[92] { + if yyq130[94] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("evictionSoftGracePeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym429 := z.EncBinary() - _ = yym429 + yym435 := z.EncBinary() + _ = yym435 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvictionSoftGracePeriod)) @@ -3245,42 +3283,42 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[93] { - yy431 := &x.EvictionPressureTransitionPeriod - yym432 := z.EncBinary() - _ = yym432 + if yyq130[95] { + yy437 := &x.EvictionPressureTransitionPeriod + yym438 := z.EncBinary() + _ = yym438 if false { - } else if z.HasExtensions() && z.EncExt(yy431) { - } else if !yym432 && z.IsJSONHandle() { - z.EncJSONMarshal(yy431) + } else if z.HasExtensions() && z.EncExt(yy437) { + } else if !yym438 && z.IsJSONHandle() { + z.EncJSONMarshal(yy437) } else { - z.EncFallback(yy431) + z.EncFallback(yy437) } } else { r.EncodeNil() } } else { - if yyq130[93] { + if yyq130[95] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("evictionPressureTransitionPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy433 := &x.EvictionPressureTransitionPeriod - yym434 := z.EncBinary() - _ = yym434 + yy439 := &x.EvictionPressureTransitionPeriod + yym440 := z.EncBinary() + _ = yym440 if false { - } else if z.HasExtensions() && z.EncExt(yy433) { - } else if !yym434 && z.IsJSONHandle() { - z.EncJSONMarshal(yy433) + } else if z.HasExtensions() && z.EncExt(yy439) { + } else if !yym440 && z.IsJSONHandle() { + z.EncJSONMarshal(yy439) } else { - z.EncFallback(yy433) + z.EncFallback(yy439) } } } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[94] { - yym436 := z.EncBinary() - _ = yym436 + if yyq130[96] { + yym442 := z.EncBinary() + _ = yym442 if false { } else { r.EncodeInt(int64(x.EvictionMaxPodGracePeriod)) @@ -3289,12 +3327,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeInt(0) } } else { - if yyq130[94] { + if yyq130[96] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("evictionMaxPodGracePeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym437 := z.EncBinary() - _ = yym437 + yym443 := z.EncBinary() + _ = yym443 if false { } else { r.EncodeInt(int64(x.EvictionMaxPodGracePeriod)) @@ -3303,9 +3341,9 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[95] { - yym439 := z.EncBinary() - _ = yym439 + if yyq130[97] { + yym445 := z.EncBinary() + _ = yym445 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvictionMinimumReclaim)) @@ -3314,12 +3352,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq130[95] { + if yyq130[97] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("evictionMinimumReclaim")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym440 := z.EncBinary() - _ = yym440 + yym446 := z.EncBinary() + _ = yym446 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.EvictionMinimumReclaim)) @@ -3328,8 +3366,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym442 := z.EncBinary() - _ = yym442 + yym448 := z.EncBinary() + _ = yym448 if false { } else { r.EncodeInt(int64(x.PodsPerCore)) @@ -3338,8 +3376,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podsPerCore")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym443 := z.EncBinary() - _ = yym443 + yym449 := z.EncBinary() + _ = yym449 if false { } else { r.EncodeInt(int64(x.PodsPerCore)) @@ -3347,8 +3385,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym445 := z.EncBinary() - _ = yym445 + yym451 := z.EncBinary() + _ = yym451 if false { } else { r.EncodeBool(bool(x.EnableControllerAttachDetach)) @@ -3357,8 +3395,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("enableControllerAttachDetach")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym446 := z.EncBinary() - _ = yym446 + yym452 := z.EncBinary() + _ = yym452 if false { } else { r.EncodeBool(bool(x.EnableControllerAttachDetach)) @@ -3369,8 +3407,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.SystemReserved == nil { r.EncodeNil() } else { - yym448 := z.EncBinary() - _ = yym448 + yym454 := z.EncBinary() + _ = yym454 if false { } else if z.HasExtensions() && z.EncExt(x.SystemReserved) { } else { @@ -3384,8 +3422,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.SystemReserved == nil { r.EncodeNil() } else { - yym449 := z.EncBinary() - _ = yym449 + yym455 := z.EncBinary() + _ = yym455 if false { } else if z.HasExtensions() && z.EncExt(x.SystemReserved) { } else { @@ -3398,8 +3436,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.KubeReserved == nil { r.EncodeNil() } else { - yym451 := z.EncBinary() - _ = yym451 + yym457 := z.EncBinary() + _ = yym457 if false { } else if z.HasExtensions() && z.EncExt(x.KubeReserved) { } else { @@ -3413,8 +3451,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x.KubeReserved == nil { r.EncodeNil() } else { - yym452 := z.EncBinary() - _ = yym452 + yym458 := z.EncBinary() + _ = yym458 if false { } else if z.HasExtensions() && z.EncExt(x.KubeReserved) { } else { @@ -3424,8 +3462,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym454 := z.EncBinary() - _ = yym454 + yym460 := z.EncBinary() + _ = yym460 if false { } else { r.EncodeBool(bool(x.ProtectKernelDefaults)) @@ -3434,8 +3472,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("protectKernelDefaults")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym455 := z.EncBinary() - _ = yym455 + yym461 := z.EncBinary() + _ = yym461 if false { } else { r.EncodeBool(bool(x.ProtectKernelDefaults)) @@ -3443,8 +3481,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym457 := z.EncBinary() - _ = yym457 + yym463 := z.EncBinary() + _ = yym463 if false { } else { r.EncodeBool(bool(x.MakeIPTablesUtilChains)) @@ -3453,8 +3491,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("makeIPTablesUtilChains")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym458 := z.EncBinary() - _ = yym458 + yym464 := z.EncBinary() + _ = yym464 if false { } else { r.EncodeBool(bool(x.MakeIPTablesUtilChains)) @@ -3462,8 +3500,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym460 := z.EncBinary() - _ = yym460 + yym466 := z.EncBinary() + _ = yym466 if false { } else { r.EncodeInt(int64(x.IPTablesMasqueradeBit)) @@ -3472,8 +3510,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("iptablesMasqueradeBit")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym461 := z.EncBinary() - _ = yym461 + yym467 := z.EncBinary() + _ = yym467 if false { } else { r.EncodeInt(int64(x.IPTablesMasqueradeBit)) @@ -3481,8 +3519,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym463 := z.EncBinary() - _ = yym463 + yym469 := z.EncBinary() + _ = yym469 if false { } else { r.EncodeInt(int64(x.IPTablesDropBit)) @@ -3491,8 +3529,8 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("iptablesDropBit")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym464 := z.EncBinary() - _ = yym464 + yym470 := z.EncBinary() + _ = yym470 if false { } else { r.EncodeInt(int64(x.IPTablesDropBit)) @@ -3500,12 +3538,12 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { } if yyr130 || yy2arr130 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq130[104] { + if yyq130[106] { if x.AllowedUnsafeSysctls == nil { r.EncodeNil() } else { - yym466 := z.EncBinary() - _ = yym466 + yym472 := z.EncBinary() + _ = yym472 if false { } else { z.F.EncSliceStringV(x.AllowedUnsafeSysctls, false, e) @@ -3515,15 +3553,15 @@ func (x *KubeletConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeNil() } } else { - if yyq130[104] { + if yyq130[106] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("experimentalAllowedUnsafeSysctls")) z.EncSendContainerState(codecSelfer_containerMapValue1234) if x.AllowedUnsafeSysctls == nil { r.EncodeNil() } else { - yym467 := z.EncBinary() - _ = yym467 + yym473 := z.EncBinary() + _ = yym473 if false { } else { z.F.EncSliceStringV(x.AllowedUnsafeSysctls, false, e) @@ -3544,25 +3582,25 @@ func (x *KubeletConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym468 := z.DecBinary() - _ = yym468 + yym474 := z.DecBinary() + _ = yym474 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct469 := r.ContainerType() - if yyct469 == codecSelferValueTypeMap1234 { - yyl469 := r.ReadMapStart() - if yyl469 == 0 { + yyct475 := r.ContainerType() + if yyct475 == codecSelferValueTypeMap1234 { + yyl475 := r.ReadMapStart() + if yyl475 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl469, d) + x.codecDecodeSelfFromMap(yyl475, d) } - } else if yyct469 == codecSelferValueTypeArray1234 { - yyl469 := r.ReadArrayStart() - if yyl469 == 0 { + } else if yyct475 == codecSelferValueTypeArray1234 { + yyl475 := r.ReadArrayStart() + if yyl475 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl469, d) + x.codecDecodeSelfFromArray(yyl475, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -3574,12 +3612,12 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys470Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys470Slc - var yyhl470 bool = l >= 0 - for yyj470 := 0; ; yyj470++ { - if yyhl470 { - if yyj470 >= l { + var yys476Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys476Slc + var yyhl476 bool = l >= 0 + for yyj476 := 0; ; yyj476++ { + if yyhl476 { + if yyj476 >= l { break } } else { @@ -3588,10 +3626,10 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys470Slc = r.DecodeBytes(yys470Slc, true, true) - yys470 := string(yys470Slc) + yys476Slc = r.DecodeBytes(yys476Slc, true, true) + yys476 := string(yys476Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys470 { + switch yys476 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" @@ -3614,45 +3652,45 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.SyncFrequency = pkg1_unversioned.Duration{} } else { - yyv474 := &x.SyncFrequency - yym475 := z.DecBinary() - _ = yym475 + yyv480 := &x.SyncFrequency + yym481 := z.DecBinary() + _ = yym481 if false { - } else if z.HasExtensions() && z.DecExt(yyv474) { - } else if !yym475 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv474) + } else if z.HasExtensions() && z.DecExt(yyv480) { + } else if !yym481 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv480) } else { - z.DecFallback(yyv474, false) + z.DecFallback(yyv480, false) } } case "fileCheckFrequency": if r.TryDecodeAsNil() { x.FileCheckFrequency = pkg1_unversioned.Duration{} } else { - yyv476 := &x.FileCheckFrequency - yym477 := z.DecBinary() - _ = yym477 + yyv482 := &x.FileCheckFrequency + yym483 := z.DecBinary() + _ = yym483 if false { - } else if z.HasExtensions() && z.DecExt(yyv476) { - } else if !yym477 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv476) + } else if z.HasExtensions() && z.DecExt(yyv482) { + } else if !yym483 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv482) } else { - z.DecFallback(yyv476, false) + z.DecFallback(yyv482, false) } } case "httpCheckFrequency": if r.TryDecodeAsNil() { x.HTTPCheckFrequency = pkg1_unversioned.Duration{} } else { - yyv478 := &x.HTTPCheckFrequency - yym479 := z.DecBinary() - _ = yym479 + yyv484 := &x.HTTPCheckFrequency + yym485 := z.DecBinary() + _ = yym485 if false { - } else if z.HasExtensions() && z.DecExt(yyv478) { - } else if !yym479 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv478) + } else if z.HasExtensions() && z.DecExt(yyv484) { + } else if !yym485 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv484) } else { - z.DecFallback(yyv478, false) + z.DecFallback(yyv484, false) } } case "manifestURL": @@ -3749,36 +3787,36 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.HostNetworkSources = nil } else { - yyv495 := &x.HostNetworkSources - yym496 := z.DecBinary() - _ = yym496 + yyv501 := &x.HostNetworkSources + yym502 := z.DecBinary() + _ = yym502 if false { } else { - z.F.DecSliceStringX(yyv495, false, d) + z.F.DecSliceStringX(yyv501, false, d) } } case "hostPIDSources": if r.TryDecodeAsNil() { x.HostPIDSources = nil } else { - yyv497 := &x.HostPIDSources - yym498 := z.DecBinary() - _ = yym498 + yyv503 := &x.HostPIDSources + yym504 := z.DecBinary() + _ = yym504 if false { } else { - z.F.DecSliceStringX(yyv497, false, d) + z.F.DecSliceStringX(yyv503, false, d) } } case "hostIPCSources": if r.TryDecodeAsNil() { x.HostIPCSources = nil } else { - yyv499 := &x.HostIPCSources - yym500 := z.DecBinary() - _ = yym500 + yyv505 := &x.HostIPCSources + yym506 := z.DecBinary() + _ = yym506 if false { } else { - z.F.DecSliceStringX(yyv499, false, d) + z.F.DecSliceStringX(yyv505, false, d) } } case "registryPullQPS": @@ -3815,15 +3853,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.MinimumGCAge = pkg1_unversioned.Duration{} } else { - yyv506 := &x.MinimumGCAge - yym507 := z.DecBinary() - _ = yym507 + yyv512 := &x.MinimumGCAge + yym513 := z.DecBinary() + _ = yym513 if false { - } else if z.HasExtensions() && z.DecExt(yyv506) { - } else if !yym507 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv506) + } else if z.HasExtensions() && z.DecExt(yyv512) { + } else if !yym513 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv512) } else { - z.DecFallback(yyv506, false) + z.DecFallback(yyv512, false) } } case "maxPerPodContainerCount": @@ -3890,45 +3928,45 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{} } else { - yyv518 := &x.StreamingConnectionIdleTimeout - yym519 := z.DecBinary() - _ = yym519 + yyv524 := &x.StreamingConnectionIdleTimeout + yym525 := z.DecBinary() + _ = yym525 if false { - } else if z.HasExtensions() && z.DecExt(yyv518) { - } else if !yym519 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv518) + } else if z.HasExtensions() && z.DecExt(yyv524) { + } else if !yym525 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv524) } else { - z.DecFallback(yyv518, false) + z.DecFallback(yyv524, false) } } case "nodeStatusUpdateFrequency": if r.TryDecodeAsNil() { x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{} } else { - yyv520 := &x.NodeStatusUpdateFrequency - yym521 := z.DecBinary() - _ = yym521 + yyv526 := &x.NodeStatusUpdateFrequency + yym527 := z.DecBinary() + _ = yym527 if false { - } else if z.HasExtensions() && z.DecExt(yyv520) { - } else if !yym521 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv520) + } else if z.HasExtensions() && z.DecExt(yyv526) { + } else if !yym527 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv526) } else { - z.DecFallback(yyv520, false) + z.DecFallback(yyv526, false) } } case "imageMinimumGCAge": if r.TryDecodeAsNil() { x.ImageMinimumGCAge = pkg1_unversioned.Duration{} } else { - yyv522 := &x.ImageMinimumGCAge - yym523 := z.DecBinary() - _ = yym523 + yyv528 := &x.ImageMinimumGCAge + yym529 := z.DecBinary() + _ = yym529 if false { - } else if z.HasExtensions() && z.DecExt(yyv522) { - } else if !yym523 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv522) + } else if z.HasExtensions() && z.DecExt(yyv528) { + } else if !yym529 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv528) } else { - z.DecFallback(yyv522, false) + z.DecFallback(yyv528, false) } } case "imageGCHighThresholdPercent": @@ -3953,15 +3991,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{} } else { - yyv527 := &x.VolumeStatsAggPeriod - yym528 := z.DecBinary() - _ = yym528 + yyv533 := &x.VolumeStatsAggPeriod + yym534 := z.DecBinary() + _ = yym534 if false { - } else if z.HasExtensions() && z.DecExt(yyv527) { - } else if !yym528 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv527) + } else if z.HasExtensions() && z.DecExt(yyv533) { + } else if !yym534 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv533) } else { - z.DecFallback(yyv527, false) + z.DecFallback(yyv533, false) } } case "networkPluginName": @@ -3982,6 +4020,18 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode } else { x.NetworkPluginDir = string(r.DecodeString()) } + case "cniConfDir": + if r.TryDecodeAsNil() { + x.CNIConfDir = "" + } else { + x.CNIConfDir = string(r.DecodeString()) + } + case "cniBinDir": + if r.TryDecodeAsNil() { + x.CNIBinDir = "" + } else { + x.CNIBinDir = string(r.DecodeString()) + } case "volumePluginDir": if r.TryDecodeAsNil() { x.VolumePluginDir = "" @@ -4052,15 +4102,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.RuntimeRequestTimeout = pkg1_unversioned.Duration{} } else { - yyv543 := &x.RuntimeRequestTimeout - yym544 := z.DecBinary() - _ = yym544 + yyv551 := &x.RuntimeRequestTimeout + yym552 := z.DecBinary() + _ = yym552 if false { - } else if z.HasExtensions() && z.DecExt(yyv543) { - } else if !yym544 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv543) + } else if z.HasExtensions() && z.DecExt(yyv551) { + } else if !yym552 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv551) } else { - z.DecFallback(yyv543, false) + z.DecFallback(yyv551, false) } } case "rktPath": @@ -4205,15 +4255,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{} } else { - yyv568 := &x.OutOfDiskTransitionFrequency - yym569 := z.DecBinary() - _ = yym569 + yyv576 := &x.OutOfDiskTransitionFrequency + yym577 := z.DecBinary() + _ = yym577 if false { - } else if z.HasExtensions() && z.DecExt(yyv568) { - } else if !yym569 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv568) + } else if z.HasExtensions() && z.DecExt(yyv576) { + } else if !yym577 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv576) } else { - z.DecFallback(yyv568, false) + z.DecFallback(yyv576, false) } } case "nodeIP": @@ -4226,12 +4276,12 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.NodeLabels = nil } else { - yyv571 := &x.NodeLabels - yym572 := z.DecBinary() - _ = yym572 + yyv579 := &x.NodeLabels + yym580 := z.DecBinary() + _ = yym580 if false { } else { - z.F.DecMapStringStringX(yyv571, false, d) + z.F.DecMapStringStringX(yyv579, false, d) } } case "nonMasqueradeCIDR": @@ -4268,15 +4318,15 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{} } else { - yyv578 := &x.EvictionPressureTransitionPeriod - yym579 := z.DecBinary() - _ = yym579 + yyv586 := &x.EvictionPressureTransitionPeriod + yym587 := z.DecBinary() + _ = yym587 if false { - } else if z.HasExtensions() && z.DecExt(yyv578) { - } else if !yym579 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv578) + } else if z.HasExtensions() && z.DecExt(yyv586) { + } else if !yym587 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv586) } else { - z.DecFallback(yyv578, false) + z.DecFallback(yyv586, false) } } case "evictionMaxPodGracePeriod": @@ -4307,26 +4357,26 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.SystemReserved = nil } else { - yyv584 := &x.SystemReserved - yym585 := z.DecBinary() - _ = yym585 + yyv592 := &x.SystemReserved + yym593 := z.DecBinary() + _ = yym593 if false { - } else if z.HasExtensions() && z.DecExt(yyv584) { + } else if z.HasExtensions() && z.DecExt(yyv592) { } else { - h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv584), d) + h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv592), d) } } case "kubeReserved": if r.TryDecodeAsNil() { x.KubeReserved = nil } else { - yyv586 := &x.KubeReserved - yym587 := z.DecBinary() - _ = yym587 + yyv594 := &x.KubeReserved + yym595 := z.DecBinary() + _ = yym595 if false { - } else if z.HasExtensions() && z.DecExt(yyv586) { + } else if z.HasExtensions() && z.DecExt(yyv594) { } else { - h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv586), d) + h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv594), d) } } case "protectKernelDefaults": @@ -4357,18 +4407,18 @@ func (x *KubeletConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decode if r.TryDecodeAsNil() { x.AllowedUnsafeSysctls = nil } else { - yyv592 := &x.AllowedUnsafeSysctls - yym593 := z.DecBinary() - _ = yym593 + yyv600 := &x.AllowedUnsafeSysctls + yym601 := z.DecBinary() + _ = yym601 if false { } else { - z.F.DecSliceStringX(yyv592, false, d) + z.F.DecSliceStringX(yyv600, false, d) } } default: - z.DecStructFieldNotFound(-1, yys470) - } // end switch yys470 - } // end for yyj470 + z.DecStructFieldNotFound(-1, yys476) + } // end switch yys476 + } // end for yyj476 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -4376,16 +4426,16 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj594 int - var yyb594 bool - var yyhl594 bool = l >= 0 - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + var yyj602 int + var yyb602 bool + var yyhl602 bool = l >= 0 + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4395,13 +4445,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.Kind = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4411,13 +4461,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.APIVersion = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4427,13 +4477,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.PodManifestPath = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4441,24 +4491,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.SyncFrequency = pkg1_unversioned.Duration{} } else { - yyv598 := &x.SyncFrequency - yym599 := z.DecBinary() - _ = yym599 + yyv606 := &x.SyncFrequency + yym607 := z.DecBinary() + _ = yym607 if false { - } else if z.HasExtensions() && z.DecExt(yyv598) { - } else if !yym599 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv598) + } else if z.HasExtensions() && z.DecExt(yyv606) { + } else if !yym607 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv606) } else { - z.DecFallback(yyv598, false) + z.DecFallback(yyv606, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4466,24 +4516,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.FileCheckFrequency = pkg1_unversioned.Duration{} } else { - yyv600 := &x.FileCheckFrequency - yym601 := z.DecBinary() - _ = yym601 + yyv608 := &x.FileCheckFrequency + yym609 := z.DecBinary() + _ = yym609 if false { - } else if z.HasExtensions() && z.DecExt(yyv600) { - } else if !yym601 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv600) + } else if z.HasExtensions() && z.DecExt(yyv608) { + } else if !yym609 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv608) } else { - z.DecFallback(yyv600, false) + z.DecFallback(yyv608, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4491,24 +4541,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.HTTPCheckFrequency = pkg1_unversioned.Duration{} } else { - yyv602 := &x.HTTPCheckFrequency - yym603 := z.DecBinary() - _ = yym603 + yyv610 := &x.HTTPCheckFrequency + yym611 := z.DecBinary() + _ = yym611 if false { - } else if z.HasExtensions() && z.DecExt(yyv602) { - } else if !yym603 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv602) + } else if z.HasExtensions() && z.DecExt(yyv610) { + } else if !yym611 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv610) } else { - z.DecFallback(yyv602, false) + z.DecFallback(yyv610, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4518,13 +4568,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ManifestURL = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4534,13 +4584,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ManifestURLHeader = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4550,13 +4600,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EnableServer = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4566,13 +4616,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.Address = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4582,13 +4632,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.Port = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4598,13 +4648,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ReadOnlyPort = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4614,13 +4664,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.TLSCertFile = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4630,13 +4680,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.TLSPrivateKeyFile = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4646,13 +4696,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CertDirectory = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4662,13 +4712,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HostnameOverride = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4678,13 +4728,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.PodInfraContainerImage = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4694,13 +4744,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.DockerEndpoint = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4710,13 +4760,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RootDirectory = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4726,13 +4776,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.SeccompProfileRoot = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4742,13 +4792,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.AllowPrivileged = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4756,21 +4806,21 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.HostNetworkSources = nil } else { - yyv619 := &x.HostNetworkSources - yym620 := z.DecBinary() - _ = yym620 + yyv627 := &x.HostNetworkSources + yym628 := z.DecBinary() + _ = yym628 if false { } else { - z.F.DecSliceStringX(yyv619, false, d) + z.F.DecSliceStringX(yyv627, false, d) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4778,21 +4828,21 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.HostPIDSources = nil } else { - yyv621 := &x.HostPIDSources - yym622 := z.DecBinary() - _ = yym622 + yyv629 := &x.HostPIDSources + yym630 := z.DecBinary() + _ = yym630 if false { } else { - z.F.DecSliceStringX(yyv621, false, d) + z.F.DecSliceStringX(yyv629, false, d) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4800,21 +4850,21 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.HostIPCSources = nil } else { - yyv623 := &x.HostIPCSources - yym624 := z.DecBinary() - _ = yym624 + yyv631 := &x.HostIPCSources + yym632 := z.DecBinary() + _ = yym632 if false { } else { - z.F.DecSliceStringX(yyv623, false, d) + z.F.DecSliceStringX(yyv631, false, d) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4824,13 +4874,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RegistryPullQPS = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4840,13 +4890,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RegistryBurst = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4856,13 +4906,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EventRecordQPS = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4872,13 +4922,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EventBurst = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4888,13 +4938,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EnableDebuggingHandlers = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4902,24 +4952,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.MinimumGCAge = pkg1_unversioned.Duration{} } else { - yyv630 := &x.MinimumGCAge - yym631 := z.DecBinary() - _ = yym631 + yyv638 := &x.MinimumGCAge + yym639 := z.DecBinary() + _ = yym639 if false { - } else if z.HasExtensions() && z.DecExt(yyv630) { - } else if !yym631 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv630) + } else if z.HasExtensions() && z.DecExt(yyv638) { + } else if !yym639 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv638) } else { - z.DecFallback(yyv630, false) + z.DecFallback(yyv638, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4929,13 +4979,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.MaxPerPodContainerCount = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4945,13 +4995,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.MaxContainerCount = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4961,13 +5011,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CAdvisorPort = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4977,13 +5027,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HealthzPort = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -4993,13 +5043,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HealthzBindAddress = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5009,13 +5059,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.OOMScoreAdj = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5025,13 +5075,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RegisterNode = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5041,13 +5091,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ClusterDomain = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5057,13 +5107,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.MasterServiceNamespace = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5073,13 +5123,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ClusterDNS = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5087,24 +5137,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.StreamingConnectionIdleTimeout = pkg1_unversioned.Duration{} } else { - yyv642 := &x.StreamingConnectionIdleTimeout - yym643 := z.DecBinary() - _ = yym643 + yyv650 := &x.StreamingConnectionIdleTimeout + yym651 := z.DecBinary() + _ = yym651 if false { - } else if z.HasExtensions() && z.DecExt(yyv642) { - } else if !yym643 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv642) + } else if z.HasExtensions() && z.DecExt(yyv650) { + } else if !yym651 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv650) } else { - z.DecFallback(yyv642, false) + z.DecFallback(yyv650, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5112,24 +5162,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.NodeStatusUpdateFrequency = pkg1_unversioned.Duration{} } else { - yyv644 := &x.NodeStatusUpdateFrequency - yym645 := z.DecBinary() - _ = yym645 + yyv652 := &x.NodeStatusUpdateFrequency + yym653 := z.DecBinary() + _ = yym653 if false { - } else if z.HasExtensions() && z.DecExt(yyv644) { - } else if !yym645 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv644) + } else if z.HasExtensions() && z.DecExt(yyv652) { + } else if !yym653 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv652) } else { - z.DecFallback(yyv644, false) + z.DecFallback(yyv652, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5137,24 +5187,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.ImageMinimumGCAge = pkg1_unversioned.Duration{} } else { - yyv646 := &x.ImageMinimumGCAge - yym647 := z.DecBinary() - _ = yym647 + yyv654 := &x.ImageMinimumGCAge + yym655 := z.DecBinary() + _ = yym655 if false { - } else if z.HasExtensions() && z.DecExt(yyv646) { - } else if !yym647 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv646) + } else if z.HasExtensions() && z.DecExt(yyv654) { + } else if !yym655 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv654) } else { - z.DecFallback(yyv646, false) + z.DecFallback(yyv654, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5164,13 +5214,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ImageGCHighThresholdPercent = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5180,13 +5230,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ImageGCLowThresholdPercent = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5196,13 +5246,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.LowDiskSpaceThresholdMB = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5210,24 +5260,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.VolumeStatsAggPeriod = pkg1_unversioned.Duration{} } else { - yyv651 := &x.VolumeStatsAggPeriod - yym652 := z.DecBinary() - _ = yym652 + yyv659 := &x.VolumeStatsAggPeriod + yym660 := z.DecBinary() + _ = yym660 if false { - } else if z.HasExtensions() && z.DecExt(yyv651) { - } else if !yym652 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv651) + } else if z.HasExtensions() && z.DecExt(yyv659) { + } else if !yym660 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv659) } else { - z.DecFallback(yyv651, false) + z.DecFallback(yyv659, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5237,13 +5287,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NetworkPluginName = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5253,13 +5303,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NetworkPluginMTU = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5269,13 +5319,45 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NetworkPluginDir = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CNIConfDir = "" + } else { + x.CNIConfDir = string(r.DecodeString()) + } + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l + } else { + yyb602 = r.CheckBreak() + } + if yyb602 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.CNIBinDir = "" + } else { + x.CNIBinDir = string(r.DecodeString()) + } + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l + } else { + yyb602 = r.CheckBreak() + } + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5285,13 +5367,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.VolumePluginDir = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5301,13 +5383,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CloudProvider = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5317,13 +5399,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CloudConfigFile = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5333,13 +5415,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.KubeletCgroups = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5349,13 +5431,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CgroupsPerQOS = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5365,13 +5447,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RuntimeCgroups = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5381,13 +5463,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.SystemCgroups = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5397,13 +5479,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CgroupRoot = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5413,13 +5495,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ContainerRuntime = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5429,13 +5511,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RemoteRuntimeEndpoint = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5445,13 +5527,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RemoteImageEndpoint = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5459,24 +5541,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.RuntimeRequestTimeout = pkg1_unversioned.Duration{} } else { - yyv667 := &x.RuntimeRequestTimeout - yym668 := z.DecBinary() - _ = yym668 + yyv677 := &x.RuntimeRequestTimeout + yym678 := z.DecBinary() + _ = yym678 if false { - } else if z.HasExtensions() && z.DecExt(yyv667) { - } else if !yym668 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv667) + } else if z.HasExtensions() && z.DecExt(yyv677) { + } else if !yym678 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv677) } else { - z.DecFallback(yyv667, false) + z.DecFallback(yyv677, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5486,13 +5568,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RktPath = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5502,13 +5584,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RktAPIEndpoint = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5518,13 +5600,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RktStage1Image = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5534,13 +5616,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.LockFilePath = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5550,13 +5632,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ExitOnLockContention = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5566,13 +5648,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ConfigureCBR0 = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5582,13 +5664,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.HairpinMode = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5598,13 +5680,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.BabysitDaemons = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5614,13 +5696,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.MaxPods = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5630,13 +5712,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NvidiaGPUs = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5646,13 +5728,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.DockerExecHandlerName = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5662,13 +5744,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.PodCIDR = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5678,13 +5760,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ResolverConfig = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5694,13 +5776,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.CPUCFSQuota = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5710,13 +5792,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.Containerized = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5726,13 +5808,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.MaxOpenFiles = int64(r.DecodeInt(64)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5742,13 +5824,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ReconcileCIDR = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5758,13 +5840,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.RegisterSchedulable = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5774,13 +5856,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ContentType = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5790,13 +5872,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.KubeAPIQPS = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5806,13 +5888,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.KubeAPIBurst = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5822,13 +5904,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.SerializeImagePulls = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5838,13 +5920,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ExperimentalFlannelOverlay = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5852,24 +5934,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.OutOfDiskTransitionFrequency = pkg1_unversioned.Duration{} } else { - yyv692 := &x.OutOfDiskTransitionFrequency - yym693 := z.DecBinary() - _ = yym693 + yyv702 := &x.OutOfDiskTransitionFrequency + yym703 := z.DecBinary() + _ = yym703 if false { - } else if z.HasExtensions() && z.DecExt(yyv692) { - } else if !yym693 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv692) + } else if z.HasExtensions() && z.DecExt(yyv702) { + } else if !yym703 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv702) } else { - z.DecFallback(yyv692, false) + z.DecFallback(yyv702, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5879,13 +5961,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NodeIP = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5893,21 +5975,21 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.NodeLabels = nil } else { - yyv695 := &x.NodeLabels - yym696 := z.DecBinary() - _ = yym696 + yyv705 := &x.NodeLabels + yym706 := z.DecBinary() + _ = yym706 if false { } else { - z.F.DecMapStringStringX(yyv695, false, d) + z.F.DecMapStringStringX(yyv705, false, d) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5917,13 +5999,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.NonMasqueradeCIDR = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5933,13 +6015,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EnableCustomMetrics = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5949,13 +6031,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EvictionHard = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5965,13 +6047,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EvictionSoft = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5981,13 +6063,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EvictionSoftGracePeriod = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -5995,24 +6077,24 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.EvictionPressureTransitionPeriod = pkg1_unversioned.Duration{} } else { - yyv702 := &x.EvictionPressureTransitionPeriod - yym703 := z.DecBinary() - _ = yym703 + yyv712 := &x.EvictionPressureTransitionPeriod + yym713 := z.DecBinary() + _ = yym713 if false { - } else if z.HasExtensions() && z.DecExt(yyv702) { - } else if !yym703 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv702) + } else if z.HasExtensions() && z.DecExt(yyv712) { + } else if !yym713 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv712) } else { - z.DecFallback(yyv702, false) + z.DecFallback(yyv712, false) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6022,13 +6104,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EvictionMaxPodGracePeriod = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6038,13 +6120,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EvictionMinimumReclaim = string(r.DecodeString()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6054,13 +6136,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.PodsPerCore = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6070,13 +6152,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.EnableControllerAttachDetach = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6084,22 +6166,22 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.SystemReserved = nil } else { - yyv708 := &x.SystemReserved - yym709 := z.DecBinary() - _ = yym709 + yyv718 := &x.SystemReserved + yym719 := z.DecBinary() + _ = yym719 if false { - } else if z.HasExtensions() && z.DecExt(yyv708) { + } else if z.HasExtensions() && z.DecExt(yyv718) { } else { - h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv708), d) + h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv718), d) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6107,22 +6189,22 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.KubeReserved = nil } else { - yyv710 := &x.KubeReserved - yym711 := z.DecBinary() - _ = yym711 + yyv720 := &x.KubeReserved + yym721 := z.DecBinary() + _ = yym721 if false { - } else if z.HasExtensions() && z.DecExt(yyv710) { + } else if z.HasExtensions() && z.DecExt(yyv720) { } else { - h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv710), d) + h.decconfig_ConfigurationMap((*pkg2_config.ConfigurationMap)(yyv720), d) } } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6132,13 +6214,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.ProtectKernelDefaults = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6148,13 +6230,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.MakeIPTablesUtilChains = bool(r.DecodeBool()) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6164,13 +6246,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.IPTablesMasqueradeBit = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6180,13 +6262,13 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco } else { x.IPTablesDropBit = int32(r.DecodeInt(32)) } - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6194,26 +6276,26 @@ func (x *KubeletConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Deco if r.TryDecodeAsNil() { x.AllowedUnsafeSysctls = nil } else { - yyv716 := &x.AllowedUnsafeSysctls - yym717 := z.DecBinary() - _ = yym717 + yyv726 := &x.AllowedUnsafeSysctls + yym727 := z.DecBinary() + _ = yym727 if false { } else { - z.F.DecSliceStringX(yyv716, false, d) + z.F.DecSliceStringX(yyv726, false, d) } } for { - yyj594++ - if yyhl594 { - yyb594 = yyj594 > l + yyj602++ + if yyhl602 { + yyb602 = yyj602 > l } else { - yyb594 = r.CheckBreak() + yyb602 = r.CheckBreak() } - if yyb594 { + if yyb602 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj594-1, "") + z.DecStructFieldNotFound(yyj602-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -6225,36 +6307,36 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym718 := z.EncBinary() - _ = yym718 + yym728 := z.EncBinary() + _ = yym728 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep719 := !z.EncBinary() - yy2arr719 := z.EncBasicHandle().StructToArray - var yyq719 [14]bool - _, _, _ = yysep719, yyq719, yy2arr719 - const yyr719 bool = false - yyq719[0] = x.Kind != "" - yyq719[1] = x.APIVersion != "" - var yynn719 int - if yyr719 || yy2arr719 { + yysep729 := !z.EncBinary() + yy2arr729 := z.EncBasicHandle().StructToArray + var yyq729 [14]bool + _, _, _ = yysep729, yyq729, yy2arr729 + const yyr729 bool = false + yyq729[0] = x.Kind != "" + yyq729[1] = x.APIVersion != "" + var yynn729 int + if yyr729 || yy2arr729 { r.EncodeArrayStart(14) } else { - yynn719 = 12 - for _, b := range yyq719 { + yynn729 = 12 + for _, b := range yyq729 { if b { - yynn719++ + yynn729++ } } - r.EncodeMapStart(yynn719) - yynn719 = 0 + r.EncodeMapStart(yynn729) + yynn729 = 0 } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq719[0] { - yym721 := z.EncBinary() - _ = yym721 + if yyq729[0] { + yym731 := z.EncBinary() + _ = yym731 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -6263,23 +6345,23 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq719[0] { + if yyq729[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym722 := z.EncBinary() - _ = yym722 + yym732 := z.EncBinary() + _ = yym732 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq719[1] { - yym724 := z.EncBinary() - _ = yym724 + if yyq729[1] { + yym734 := z.EncBinary() + _ = yym734 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -6288,22 +6370,22 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq719[1] { + if yyq729[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym725 := z.EncBinary() - _ = yym725 + yym735 := z.EncBinary() + _ = yym735 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym727 := z.EncBinary() - _ = yym727 + yym737 := z.EncBinary() + _ = yym737 if false { } else { r.EncodeInt(int64(x.Port)) @@ -6312,17 +6394,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("port")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym728 := z.EncBinary() - _ = yym728 + yym738 := z.EncBinary() + _ = yym738 if false { } else { r.EncodeInt(int64(x.Port)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym730 := z.EncBinary() - _ = yym730 + yym740 := z.EncBinary() + _ = yym740 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Address)) @@ -6331,17 +6413,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("address")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym731 := z.EncBinary() - _ = yym731 + yym741 := z.EncBinary() + _ = yym741 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Address)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym733 := z.EncBinary() - _ = yym733 + yym743 := z.EncBinary() + _ = yym743 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.AlgorithmProvider)) @@ -6350,17 +6432,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("algorithmProvider")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym734 := z.EncBinary() - _ = yym734 + yym744 := z.EncBinary() + _ = yym744 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.AlgorithmProvider)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym736 := z.EncBinary() - _ = yym736 + yym746 := z.EncBinary() + _ = yym746 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile)) @@ -6369,17 +6451,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("policyConfigFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym737 := z.EncBinary() - _ = yym737 + yym747 := z.EncBinary() + _ = yym747 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PolicyConfigFile)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym739 := z.EncBinary() - _ = yym739 + yym749 := z.EncBinary() + _ = yym749 if false { } else { r.EncodeBool(bool(x.EnableProfiling)) @@ -6388,17 +6470,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("enableProfiling")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym740 := z.EncBinary() - _ = yym740 + yym750 := z.EncBinary() + _ = yym750 if false { } else { r.EncodeBool(bool(x.EnableProfiling)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym742 := z.EncBinary() - _ = yym742 + yym752 := z.EncBinary() + _ = yym752 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) @@ -6407,17 +6489,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("contentType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym743 := z.EncBinary() - _ = yym743 + yym753 := z.EncBinary() + _ = yym753 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym745 := z.EncBinary() - _ = yym745 + yym755 := z.EncBinary() + _ = yym755 if false { } else { r.EncodeFloat32(float32(x.KubeAPIQPS)) @@ -6426,17 +6508,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym746 := z.EncBinary() - _ = yym746 + yym756 := z.EncBinary() + _ = yym756 if false { } else { r.EncodeFloat32(float32(x.KubeAPIQPS)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym748 := z.EncBinary() - _ = yym748 + yym758 := z.EncBinary() + _ = yym758 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) @@ -6445,17 +6527,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym749 := z.EncBinary() - _ = yym749 + yym759 := z.EncBinary() + _ = yym759 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym751 := z.EncBinary() - _ = yym751 + yym761 := z.EncBinary() + _ = yym761 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) @@ -6464,17 +6546,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("schedulerName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym752 := z.EncBinary() - _ = yym752 + yym762 := z.EncBinary() + _ = yym762 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.SchedulerName)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym754 := z.EncBinary() - _ = yym754 + yym764 := z.EncBinary() + _ = yym764 if false { } else { r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) @@ -6483,17 +6565,17 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("hardPodAffinitySymmetricWeight")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym755 := z.EncBinary() - _ = yym755 + yym765 := z.EncBinary() + _ = yym765 if false { } else { r.EncodeInt(int64(x.HardPodAffinitySymmetricWeight)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym757 := z.EncBinary() - _ = yym757 + yym767 := z.EncBinary() + _ = yym767 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) @@ -6502,25 +6584,25 @@ func (x *KubeSchedulerConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("failureDomains")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym758 := z.EncBinary() - _ = yym758 + yym768 := z.EncBinary() + _ = yym768 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FailureDomains)) } } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy760 := &x.LeaderElection - yy760.CodecEncodeSelf(e) + yy770 := &x.LeaderElection + yy770.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("leaderElection")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy761 := &x.LeaderElection - yy761.CodecEncodeSelf(e) + yy771 := &x.LeaderElection + yy771.CodecEncodeSelf(e) } - if yyr719 || yy2arr719 { + if yyr729 || yy2arr729 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -6533,25 +6615,25 @@ func (x *KubeSchedulerConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym762 := z.DecBinary() - _ = yym762 + yym772 := z.DecBinary() + _ = yym772 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct763 := r.ContainerType() - if yyct763 == codecSelferValueTypeMap1234 { - yyl763 := r.ReadMapStart() - if yyl763 == 0 { + yyct773 := r.ContainerType() + if yyct773 == codecSelferValueTypeMap1234 { + yyl773 := r.ReadMapStart() + if yyl773 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl763, d) + x.codecDecodeSelfFromMap(yyl773, d) } - } else if yyct763 == codecSelferValueTypeArray1234 { - yyl763 := r.ReadArrayStart() - if yyl763 == 0 { + } else if yyct773 == codecSelferValueTypeArray1234 { + yyl773 := r.ReadArrayStart() + if yyl773 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl763, d) + x.codecDecodeSelfFromArray(yyl773, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -6563,12 +6645,12 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978. var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys764Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys764Slc - var yyhl764 bool = l >= 0 - for yyj764 := 0; ; yyj764++ { - if yyhl764 { - if yyj764 >= l { + var yys774Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys774Slc + var yyhl774 bool = l >= 0 + for yyj774 := 0; ; yyj774++ { + if yyhl774 { + if yyj774 >= l { break } } else { @@ -6577,10 +6659,10 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978. } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys764Slc = r.DecodeBytes(yys764Slc, true, true) - yys764 := string(yys764Slc) + yys774Slc = r.DecodeBytes(yys774Slc, true, true) + yys774 := string(yys774Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys764 { + switch yys774 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" @@ -6663,13 +6745,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromMap(l int, d *codec1978. if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv778 := &x.LeaderElection - yyv778.CodecDecodeSelf(d) + yyv788 := &x.LeaderElection + yyv788.CodecDecodeSelf(d) } default: - z.DecStructFieldNotFound(-1, yys764) - } // end switch yys764 - } // end for yyj764 + z.DecStructFieldNotFound(-1, yys774) + } // end switch yys774 + } // end for yyj774 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -6677,16 +6759,16 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj779 int - var yyb779 bool - var yyhl779 bool = l >= 0 - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + var yyj789 int + var yyb789 bool + var yyhl789 bool = l >= 0 + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6696,13 +6778,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.Kind = string(r.DecodeString()) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6712,13 +6794,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.APIVersion = string(r.DecodeString()) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6728,13 +6810,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.Port = int32(r.DecodeInt(32)) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6744,13 +6826,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.Address = string(r.DecodeString()) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6760,13 +6842,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.AlgorithmProvider = string(r.DecodeString()) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6776,13 +6858,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.PolicyConfigFile = string(r.DecodeString()) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6792,13 +6874,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.EnableProfiling = bool(r.DecodeBool()) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6808,13 +6890,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.ContentType = string(r.DecodeString()) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6824,13 +6906,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.KubeAPIQPS = float32(r.DecodeFloat(true)) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6840,13 +6922,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.KubeAPIBurst = int32(r.DecodeInt(32)) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6856,13 +6938,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.SchedulerName = string(r.DecodeString()) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6872,13 +6954,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.HardPodAffinitySymmetricWeight = int(r.DecodeInt(codecSelferBitsize1234)) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6888,13 +6970,13 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 } else { x.FailureDomains = string(r.DecodeString()) } - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -6902,21 +6984,21 @@ func (x *KubeSchedulerConfiguration) codecDecodeSelfFromArray(l int, d *codec197 if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv793 := &x.LeaderElection - yyv793.CodecDecodeSelf(d) + yyv803 := &x.LeaderElection + yyv803.CodecDecodeSelf(d) } for { - yyj779++ - if yyhl779 { - yyb779 = yyj779 > l + yyj789++ + if yyhl789 { + yyb789 = yyj789 > l } else { - yyb779 = r.CheckBreak() + yyb789 = r.CheckBreak() } - if yyb779 { + if yyb789 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj779-1, "") + z.DecStructFieldNotFound(yyj789-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -6928,33 +7010,33 @@ func (x *LeaderElectionConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym794 := z.EncBinary() - _ = yym794 + yym804 := z.EncBinary() + _ = yym804 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep795 := !z.EncBinary() - yy2arr795 := z.EncBasicHandle().StructToArray - var yyq795 [4]bool - _, _, _ = yysep795, yyq795, yy2arr795 - const yyr795 bool = false - var yynn795 int - if yyr795 || yy2arr795 { + yysep805 := !z.EncBinary() + yy2arr805 := z.EncBasicHandle().StructToArray + var yyq805 [4]bool + _, _, _ = yysep805, yyq805, yy2arr805 + const yyr805 bool = false + var yynn805 int + if yyr805 || yy2arr805 { r.EncodeArrayStart(4) } else { - yynn795 = 4 - for _, b := range yyq795 { + yynn805 = 4 + for _, b := range yyq805 { if b { - yynn795++ + yynn805++ } } - r.EncodeMapStart(yynn795) - yynn795 = 0 + r.EncodeMapStart(yynn805) + yynn805 = 0 } - if yyr795 || yy2arr795 { + if yyr805 || yy2arr805 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym797 := z.EncBinary() - _ = yym797 + yym807 := z.EncBinary() + _ = yym807 if false { } else { r.EncodeBool(bool(x.LeaderElect)) @@ -6963,95 +7045,95 @@ func (x *LeaderElectionConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("leaderElect")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym798 := z.EncBinary() - _ = yym798 + yym808 := z.EncBinary() + _ = yym808 if false { } else { r.EncodeBool(bool(x.LeaderElect)) } } - if yyr795 || yy2arr795 { + if yyr805 || yy2arr805 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy800 := &x.LeaseDuration - yym801 := z.EncBinary() - _ = yym801 + yy810 := &x.LeaseDuration + yym811 := z.EncBinary() + _ = yym811 if false { - } else if z.HasExtensions() && z.EncExt(yy800) { - } else if !yym801 && z.IsJSONHandle() { - z.EncJSONMarshal(yy800) + } else if z.HasExtensions() && z.EncExt(yy810) { + } else if !yym811 && z.IsJSONHandle() { + z.EncJSONMarshal(yy810) } else { - z.EncFallback(yy800) + z.EncFallback(yy810) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("leaseDuration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy802 := &x.LeaseDuration - yym803 := z.EncBinary() - _ = yym803 + yy812 := &x.LeaseDuration + yym813 := z.EncBinary() + _ = yym813 if false { - } else if z.HasExtensions() && z.EncExt(yy802) { - } else if !yym803 && z.IsJSONHandle() { - z.EncJSONMarshal(yy802) + } else if z.HasExtensions() && z.EncExt(yy812) { + } else if !yym813 && z.IsJSONHandle() { + z.EncJSONMarshal(yy812) } else { - z.EncFallback(yy802) + z.EncFallback(yy812) } } - if yyr795 || yy2arr795 { + if yyr805 || yy2arr805 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy805 := &x.RenewDeadline - yym806 := z.EncBinary() - _ = yym806 + yy815 := &x.RenewDeadline + yym816 := z.EncBinary() + _ = yym816 if false { - } else if z.HasExtensions() && z.EncExt(yy805) { - } else if !yym806 && z.IsJSONHandle() { - z.EncJSONMarshal(yy805) + } else if z.HasExtensions() && z.EncExt(yy815) { + } else if !yym816 && z.IsJSONHandle() { + z.EncJSONMarshal(yy815) } else { - z.EncFallback(yy805) + z.EncFallback(yy815) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("renewDeadline")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy807 := &x.RenewDeadline - yym808 := z.EncBinary() - _ = yym808 + yy817 := &x.RenewDeadline + yym818 := z.EncBinary() + _ = yym818 if false { - } else if z.HasExtensions() && z.EncExt(yy807) { - } else if !yym808 && z.IsJSONHandle() { - z.EncJSONMarshal(yy807) + } else if z.HasExtensions() && z.EncExt(yy817) { + } else if !yym818 && z.IsJSONHandle() { + z.EncJSONMarshal(yy817) } else { - z.EncFallback(yy807) + z.EncFallback(yy817) } } - if yyr795 || yy2arr795 { + if yyr805 || yy2arr805 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy810 := &x.RetryPeriod - yym811 := z.EncBinary() - _ = yym811 + yy820 := &x.RetryPeriod + yym821 := z.EncBinary() + _ = yym821 if false { - } else if z.HasExtensions() && z.EncExt(yy810) { - } else if !yym811 && z.IsJSONHandle() { - z.EncJSONMarshal(yy810) + } else if z.HasExtensions() && z.EncExt(yy820) { + } else if !yym821 && z.IsJSONHandle() { + z.EncJSONMarshal(yy820) } else { - z.EncFallback(yy810) + z.EncFallback(yy820) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("retryPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy812 := &x.RetryPeriod - yym813 := z.EncBinary() - _ = yym813 + yy822 := &x.RetryPeriod + yym823 := z.EncBinary() + _ = yym823 if false { - } else if z.HasExtensions() && z.EncExt(yy812) { - } else if !yym813 && z.IsJSONHandle() { - z.EncJSONMarshal(yy812) + } else if z.HasExtensions() && z.EncExt(yy822) { + } else if !yym823 && z.IsJSONHandle() { + z.EncJSONMarshal(yy822) } else { - z.EncFallback(yy812) + z.EncFallback(yy822) } } - if yyr795 || yy2arr795 { + if yyr805 || yy2arr805 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -7064,25 +7146,25 @@ func (x *LeaderElectionConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym814 := z.DecBinary() - _ = yym814 + yym824 := z.DecBinary() + _ = yym824 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct815 := r.ContainerType() - if yyct815 == codecSelferValueTypeMap1234 { - yyl815 := r.ReadMapStart() - if yyl815 == 0 { + yyct825 := r.ContainerType() + if yyct825 == codecSelferValueTypeMap1234 { + yyl825 := r.ReadMapStart() + if yyl825 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl815, d) + x.codecDecodeSelfFromMap(yyl825, d) } - } else if yyct815 == codecSelferValueTypeArray1234 { - yyl815 := r.ReadArrayStart() - if yyl815 == 0 { + } else if yyct825 == codecSelferValueTypeArray1234 { + yyl825 := r.ReadArrayStart() + if yyl825 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl815, d) + x.codecDecodeSelfFromArray(yyl825, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -7094,12 +7176,12 @@ func (x *LeaderElectionConfiguration) codecDecodeSelfFromMap(l int, d *codec1978 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys816Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys816Slc - var yyhl816 bool = l >= 0 - for yyj816 := 0; ; yyj816++ { - if yyhl816 { - if yyj816 >= l { + var yys826Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys826Slc + var yyhl826 bool = l >= 0 + for yyj826 := 0; ; yyj826++ { + if yyhl826 { + if yyj826 >= l { break } } else { @@ -7108,10 +7190,10 @@ func (x *LeaderElectionConfiguration) codecDecodeSelfFromMap(l int, d *codec1978 } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys816Slc = r.DecodeBytes(yys816Slc, true, true) - yys816 := string(yys816Slc) + yys826Slc = r.DecodeBytes(yys826Slc, true, true) + yys826 := string(yys826Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys816 { + switch yys826 { case "leaderElect": if r.TryDecodeAsNil() { x.LeaderElect = false @@ -7122,51 +7204,51 @@ func (x *LeaderElectionConfiguration) codecDecodeSelfFromMap(l int, d *codec1978 if r.TryDecodeAsNil() { x.LeaseDuration = pkg1_unversioned.Duration{} } else { - yyv818 := &x.LeaseDuration - yym819 := z.DecBinary() - _ = yym819 + yyv828 := &x.LeaseDuration + yym829 := z.DecBinary() + _ = yym829 if false { - } else if z.HasExtensions() && z.DecExt(yyv818) { - } else if !yym819 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv818) + } else if z.HasExtensions() && z.DecExt(yyv828) { + } else if !yym829 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv828) } else { - z.DecFallback(yyv818, false) + z.DecFallback(yyv828, false) } } case "renewDeadline": if r.TryDecodeAsNil() { x.RenewDeadline = pkg1_unversioned.Duration{} } else { - yyv820 := &x.RenewDeadline - yym821 := z.DecBinary() - _ = yym821 + yyv830 := &x.RenewDeadline + yym831 := z.DecBinary() + _ = yym831 if false { - } else if z.HasExtensions() && z.DecExt(yyv820) { - } else if !yym821 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv820) + } else if z.HasExtensions() && z.DecExt(yyv830) { + } else if !yym831 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv830) } else { - z.DecFallback(yyv820, false) + z.DecFallback(yyv830, false) } } case "retryPeriod": if r.TryDecodeAsNil() { x.RetryPeriod = pkg1_unversioned.Duration{} } else { - yyv822 := &x.RetryPeriod - yym823 := z.DecBinary() - _ = yym823 + yyv832 := &x.RetryPeriod + yym833 := z.DecBinary() + _ = yym833 if false { - } else if z.HasExtensions() && z.DecExt(yyv822) { - } else if !yym823 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv822) + } else if z.HasExtensions() && z.DecExt(yyv832) { + } else if !yym833 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv832) } else { - z.DecFallback(yyv822, false) + z.DecFallback(yyv832, false) } } default: - z.DecStructFieldNotFound(-1, yys816) - } // end switch yys816 - } // end for yyj816 + z.DecStructFieldNotFound(-1, yys826) + } // end switch yys826 + } // end for yyj826 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -7174,16 +7256,16 @@ func (x *LeaderElectionConfiguration) codecDecodeSelfFromArray(l int, d *codec19 var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj824 int - var yyb824 bool - var yyhl824 bool = l >= 0 - yyj824++ - if yyhl824 { - yyb824 = yyj824 > l + var yyj834 int + var yyb834 bool + var yyhl834 bool = l >= 0 + yyj834++ + if yyhl834 { + yyb834 = yyj834 > l } else { - yyb824 = r.CheckBreak() + yyb834 = r.CheckBreak() } - if yyb824 { + if yyb834 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7193,13 +7275,13 @@ func (x *LeaderElectionConfiguration) codecDecodeSelfFromArray(l int, d *codec19 } else { x.LeaderElect = bool(r.DecodeBool()) } - yyj824++ - if yyhl824 { - yyb824 = yyj824 > l + yyj834++ + if yyhl834 { + yyb834 = yyj834 > l } else { - yyb824 = r.CheckBreak() + yyb834 = r.CheckBreak() } - if yyb824 { + if yyb834 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7207,24 +7289,24 @@ func (x *LeaderElectionConfiguration) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.LeaseDuration = pkg1_unversioned.Duration{} } else { - yyv826 := &x.LeaseDuration - yym827 := z.DecBinary() - _ = yym827 + yyv836 := &x.LeaseDuration + yym837 := z.DecBinary() + _ = yym837 if false { - } else if z.HasExtensions() && z.DecExt(yyv826) { - } else if !yym827 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv826) + } else if z.HasExtensions() && z.DecExt(yyv836) { + } else if !yym837 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv836) } else { - z.DecFallback(yyv826, false) + z.DecFallback(yyv836, false) } } - yyj824++ - if yyhl824 { - yyb824 = yyj824 > l + yyj834++ + if yyhl834 { + yyb834 = yyj834 > l } else { - yyb824 = r.CheckBreak() + yyb834 = r.CheckBreak() } - if yyb824 { + if yyb834 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7232,24 +7314,24 @@ func (x *LeaderElectionConfiguration) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.RenewDeadline = pkg1_unversioned.Duration{} } else { - yyv828 := &x.RenewDeadline - yym829 := z.DecBinary() - _ = yym829 + yyv838 := &x.RenewDeadline + yym839 := z.DecBinary() + _ = yym839 if false { - } else if z.HasExtensions() && z.DecExt(yyv828) { - } else if !yym829 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv828) + } else if z.HasExtensions() && z.DecExt(yyv838) { + } else if !yym839 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv838) } else { - z.DecFallback(yyv828, false) + z.DecFallback(yyv838, false) } } - yyj824++ - if yyhl824 { - yyb824 = yyj824 > l + yyj834++ + if yyhl834 { + yyb834 = yyj834 > l } else { - yyb824 = r.CheckBreak() + yyb834 = r.CheckBreak() } - if yyb824 { + if yyb834 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -7257,29 +7339,29 @@ func (x *LeaderElectionConfiguration) codecDecodeSelfFromArray(l int, d *codec19 if r.TryDecodeAsNil() { x.RetryPeriod = pkg1_unversioned.Duration{} } else { - yyv830 := &x.RetryPeriod - yym831 := z.DecBinary() - _ = yym831 + yyv840 := &x.RetryPeriod + yym841 := z.DecBinary() + _ = yym841 if false { - } else if z.HasExtensions() && z.DecExt(yyv830) { - } else if !yym831 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv830) + } else if z.HasExtensions() && z.DecExt(yyv840) { + } else if !yym841 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv840) } else { - z.DecFallback(yyv830, false) + z.DecFallback(yyv840, false) } } for { - yyj824++ - if yyhl824 { - yyb824 = yyj824 > l + yyj834++ + if yyhl834 { + yyb834 = yyj834 > l } else { - yyb824 = r.CheckBreak() + yyb834 = r.CheckBreak() } - if yyb824 { + if yyb834 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj824-1, "") + z.DecStructFieldNotFound(yyj834-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -7291,36 +7373,36 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode if x == nil { r.EncodeNil() } else { - yym832 := z.EncBinary() - _ = yym832 + yym842 := z.EncBinary() + _ = yym842 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep833 := !z.EncBinary() - yy2arr833 := z.EncBasicHandle().StructToArray - var yyq833 [59]bool - _, _, _ = yysep833, yyq833, yy2arr833 - const yyr833 bool = false - yyq833[0] = x.Kind != "" - yyq833[1] = x.APIVersion != "" - var yynn833 int - if yyr833 || yy2arr833 { + yysep843 := !z.EncBinary() + yy2arr843 := z.EncBasicHandle().StructToArray + var yyq843 [59]bool + _, _, _ = yysep843, yyq843, yy2arr843 + const yyr843 bool = false + yyq843[0] = x.Kind != "" + yyq843[1] = x.APIVersion != "" + var yynn843 int + if yyr843 || yy2arr843 { r.EncodeArrayStart(59) } else { - yynn833 = 57 - for _, b := range yyq833 { + yynn843 = 57 + for _, b := range yyq843 { if b { - yynn833++ + yynn843++ } } - r.EncodeMapStart(yynn833) - yynn833 = 0 + r.EncodeMapStart(yynn843) + yynn843 = 0 } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq833[0] { - yym835 := z.EncBinary() - _ = yym835 + if yyq843[0] { + yym845 := z.EncBinary() + _ = yym845 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) @@ -7329,23 +7411,23 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq833[0] { + if yyq843[0] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kind")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym836 := z.EncBinary() - _ = yym836 + yym846 := z.EncBinary() + _ = yym846 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) } } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq833[1] { - yym838 := z.EncBinary() - _ = yym838 + if yyq843[1] { + yym848 := z.EncBinary() + _ = yym848 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) @@ -7354,22 +7436,22 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode r.EncodeString(codecSelferC_UTF81234, "") } } else { - if yyq833[1] { + if yyq843[1] { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym839 := z.EncBinary() - _ = yym839 + yym849 := z.EncBinary() + _ = yym849 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) } } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym841 := z.EncBinary() - _ = yym841 + yym851 := z.EncBinary() + _ = yym851 if false { } else { r.EncodeInt(int64(x.Port)) @@ -7378,17 +7460,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("port")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym842 := z.EncBinary() - _ = yym842 + yym852 := z.EncBinary() + _ = yym852 if false { } else { r.EncodeInt(int64(x.Port)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym844 := z.EncBinary() - _ = yym844 + yym854 := z.EncBinary() + _ = yym854 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Address)) @@ -7397,17 +7479,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("address")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym845 := z.EncBinary() - _ = yym845 + yym855 := z.EncBinary() + _ = yym855 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.Address)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym847 := z.EncBinary() - _ = yym847 + yym857 := z.EncBinary() + _ = yym857 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) @@ -7416,17 +7498,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cloudProvider")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym848 := z.EncBinary() - _ = yym848 + yym858 := z.EncBinary() + _ = yym858 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudProvider)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym850 := z.EncBinary() - _ = yym850 + yym860 := z.EncBinary() + _ = yym860 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) @@ -7435,17 +7517,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("cloudConfigFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym851 := z.EncBinary() - _ = yym851 + yym861 := z.EncBinary() + _ = yym861 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.CloudConfigFile)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym853 := z.EncBinary() - _ = yym853 + yym863 := z.EncBinary() + _ = yym863 if false { } else { r.EncodeInt(int64(x.ConcurrentEndpointSyncs)) @@ -7454,17 +7536,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentEndpointSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym854 := z.EncBinary() - _ = yym854 + yym864 := z.EncBinary() + _ = yym864 if false { } else { r.EncodeInt(int64(x.ConcurrentEndpointSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym856 := z.EncBinary() - _ = yym856 + yym866 := z.EncBinary() + _ = yym866 if false { } else { r.EncodeInt(int64(x.ConcurrentRSSyncs)) @@ -7473,17 +7555,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentRSSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym857 := z.EncBinary() - _ = yym857 + yym867 := z.EncBinary() + _ = yym867 if false { } else { r.EncodeInt(int64(x.ConcurrentRSSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym859 := z.EncBinary() - _ = yym859 + yym869 := z.EncBinary() + _ = yym869 if false { } else { r.EncodeInt(int64(x.ConcurrentRCSyncs)) @@ -7492,17 +7574,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentRCSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym860 := z.EncBinary() - _ = yym860 + yym870 := z.EncBinary() + _ = yym870 if false { } else { r.EncodeInt(int64(x.ConcurrentRCSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym862 := z.EncBinary() - _ = yym862 + yym872 := z.EncBinary() + _ = yym872 if false { } else { r.EncodeInt(int64(x.ConcurrentServiceSyncs)) @@ -7511,17 +7593,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentServiceSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym863 := z.EncBinary() - _ = yym863 + yym873 := z.EncBinary() + _ = yym873 if false { } else { r.EncodeInt(int64(x.ConcurrentServiceSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym865 := z.EncBinary() - _ = yym865 + yym875 := z.EncBinary() + _ = yym875 if false { } else { r.EncodeInt(int64(x.ConcurrentResourceQuotaSyncs)) @@ -7530,17 +7612,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentResourceQuotaSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym866 := z.EncBinary() - _ = yym866 + yym876 := z.EncBinary() + _ = yym876 if false { } else { r.EncodeInt(int64(x.ConcurrentResourceQuotaSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym868 := z.EncBinary() - _ = yym868 + yym878 := z.EncBinary() + _ = yym878 if false { } else { r.EncodeInt(int64(x.ConcurrentDeploymentSyncs)) @@ -7549,17 +7631,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentDeploymentSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym869 := z.EncBinary() - _ = yym869 + yym879 := z.EncBinary() + _ = yym879 if false { } else { r.EncodeInt(int64(x.ConcurrentDeploymentSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym871 := z.EncBinary() - _ = yym871 + yym881 := z.EncBinary() + _ = yym881 if false { } else { r.EncodeInt(int64(x.ConcurrentDaemonSetSyncs)) @@ -7568,17 +7650,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentDaemonSetSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym872 := z.EncBinary() - _ = yym872 + yym882 := z.EncBinary() + _ = yym882 if false { } else { r.EncodeInt(int64(x.ConcurrentDaemonSetSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym874 := z.EncBinary() - _ = yym874 + yym884 := z.EncBinary() + _ = yym884 if false { } else { r.EncodeInt(int64(x.ConcurrentJobSyncs)) @@ -7587,17 +7669,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentJobSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym875 := z.EncBinary() - _ = yym875 + yym885 := z.EncBinary() + _ = yym885 if false { } else { r.EncodeInt(int64(x.ConcurrentJobSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym877 := z.EncBinary() - _ = yym877 + yym887 := z.EncBinary() + _ = yym887 if false { } else { r.EncodeInt(int64(x.ConcurrentNamespaceSyncs)) @@ -7606,17 +7688,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentNamespaceSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym878 := z.EncBinary() - _ = yym878 + yym888 := z.EncBinary() + _ = yym888 if false { } else { r.EncodeInt(int64(x.ConcurrentNamespaceSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym880 := z.EncBinary() - _ = yym880 + yym890 := z.EncBinary() + _ = yym890 if false { } else { r.EncodeInt(int64(x.ConcurrentSATokenSyncs)) @@ -7625,17 +7707,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentSATokenSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym881 := z.EncBinary() - _ = yym881 + yym891 := z.EncBinary() + _ = yym891 if false { } else { r.EncodeInt(int64(x.ConcurrentSATokenSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym883 := z.EncBinary() - _ = yym883 + yym893 := z.EncBinary() + _ = yym893 if false { } else { r.EncodeInt(int64(x.LookupCacheSizeForRC)) @@ -7644,17 +7726,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForRC")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym884 := z.EncBinary() - _ = yym884 + yym894 := z.EncBinary() + _ = yym894 if false { } else { r.EncodeInt(int64(x.LookupCacheSizeForRC)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym886 := z.EncBinary() - _ = yym886 + yym896 := z.EncBinary() + _ = yym896 if false { } else { r.EncodeInt(int64(x.LookupCacheSizeForRS)) @@ -7663,17 +7745,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForRS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym887 := z.EncBinary() - _ = yym887 + yym897 := z.EncBinary() + _ = yym897 if false { } else { r.EncodeInt(int64(x.LookupCacheSizeForRS)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym889 := z.EncBinary() - _ = yym889 + yym899 := z.EncBinary() + _ = yym899 if false { } else { r.EncodeInt(int64(x.LookupCacheSizeForDaemonSet)) @@ -7682,70 +7764,16 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("lookupCacheSizeForDaemonSet")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym890 := z.EncBinary() - _ = yym890 - if false { - } else { - r.EncodeInt(int64(x.LookupCacheSizeForDaemonSet)) - } - } - if yyr833 || yy2arr833 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy892 := &x.ServiceSyncPeriod - yym893 := z.EncBinary() - _ = yym893 - if false { - } else if z.HasExtensions() && z.EncExt(yy892) { - } else if !yym893 && z.IsJSONHandle() { - z.EncJSONMarshal(yy892) - } else { - z.EncFallback(yy892) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serviceSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy894 := &x.ServiceSyncPeriod - yym895 := z.EncBinary() - _ = yym895 - if false { - } else if z.HasExtensions() && z.EncExt(yy894) { - } else if !yym895 && z.IsJSONHandle() { - z.EncJSONMarshal(yy894) - } else { - z.EncFallback(yy894) - } - } - if yyr833 || yy2arr833 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy897 := &x.NodeSyncPeriod - yym898 := z.EncBinary() - _ = yym898 - if false { - } else if z.HasExtensions() && z.EncExt(yy897) { - } else if !yym898 && z.IsJSONHandle() { - z.EncJSONMarshal(yy897) - } else { - z.EncFallback(yy897) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("nodeSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy899 := &x.NodeSyncPeriod yym900 := z.EncBinary() _ = yym900 if false { - } else if z.HasExtensions() && z.EncExt(yy899) { - } else if !yym900 && z.IsJSONHandle() { - z.EncJSONMarshal(yy899) } else { - z.EncFallback(yy899) + r.EncodeInt(int64(x.LookupCacheSizeForDaemonSet)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy902 := &x.ResourceQuotaSyncPeriod + yy902 := &x.ServiceSyncPeriod yym903 := z.EncBinary() _ = yym903 if false { @@ -7757,9 +7785,9 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("resourceQuotaSyncPeriod")) + r.EncodeString(codecSelferC_UTF81234, string("serviceSyncPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy904 := &x.ResourceQuotaSyncPeriod + yy904 := &x.ServiceSyncPeriod yym905 := z.EncBinary() _ = yym905 if false { @@ -7770,9 +7798,9 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncFallback(yy904) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy907 := &x.NamespaceSyncPeriod + yy907 := &x.NodeSyncPeriod yym908 := z.EncBinary() _ = yym908 if false { @@ -7784,9 +7812,9 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("namespaceSyncPeriod")) + r.EncodeString(codecSelferC_UTF81234, string("nodeSyncPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy909 := &x.NamespaceSyncPeriod + yy909 := &x.NodeSyncPeriod yym910 := z.EncBinary() _ = yym910 if false { @@ -7797,9 +7825,9 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncFallback(yy909) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy912 := &x.PVClaimBinderSyncPeriod + yy912 := &x.ResourceQuotaSyncPeriod yym913 := z.EncBinary() _ = yym913 if false { @@ -7811,9 +7839,9 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("pvClaimBinderSyncPeriod")) + r.EncodeString(codecSelferC_UTF81234, string("resourceQuotaSyncPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy914 := &x.PVClaimBinderSyncPeriod + yy914 := &x.ResourceQuotaSyncPeriod yym915 := z.EncBinary() _ = yym915 if false { @@ -7824,9 +7852,9 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncFallback(yy914) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy917 := &x.MinResyncPeriod + yy917 := &x.NamespaceSyncPeriod yym918 := z.EncBinary() _ = yym918 if false { @@ -7838,9 +7866,9 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minResyncPeriod")) + r.EncodeString(codecSelferC_UTF81234, string("namespaceSyncPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy919 := &x.MinResyncPeriod + yy919 := &x.NamespaceSyncPeriod yym920 := z.EncBinary() _ = yym920 if false { @@ -7851,42 +7879,36 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncFallback(yy919) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym922 := z.EncBinary() - _ = yym922 + yy922 := &x.PVClaimBinderSyncPeriod + yym923 := z.EncBinary() + _ = yym923 if false { + } else if z.HasExtensions() && z.EncExt(yy922) { + } else if !yym923 && z.IsJSONHandle() { + z.EncJSONMarshal(yy922) } else { - r.EncodeInt(int64(x.TerminatedPodGCThreshold)) + z.EncFallback(yy922) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("terminatedPodGCThreshold")) + r.EncodeString(codecSelferC_UTF81234, string("pvClaimBinderSyncPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym923 := z.EncBinary() - _ = yym923 + yy924 := &x.PVClaimBinderSyncPeriod + yym925 := z.EncBinary() + _ = yym925 if false { + } else if z.HasExtensions() && z.EncExt(yy924) { + } else if !yym925 && z.IsJSONHandle() { + z.EncJSONMarshal(yy924) } else { - r.EncodeInt(int64(x.TerminatedPodGCThreshold)) + z.EncFallback(yy924) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy925 := &x.HorizontalPodAutoscalerSyncPeriod - yym926 := z.EncBinary() - _ = yym926 - if false { - } else if z.HasExtensions() && z.EncExt(yy925) { - } else if !yym926 && z.IsJSONHandle() { - z.EncJSONMarshal(yy925) - } else { - z.EncFallback(yy925) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("horizontalPodAutoscalerSyncPeriod")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy927 := &x.HorizontalPodAutoscalerSyncPeriod + yy927 := &x.MinResyncPeriod yym928 := z.EncBinary() _ = yym928 if false { @@ -7896,37 +7918,43 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } else { z.EncFallback(yy927) } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("minResyncPeriod")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy929 := &x.MinResyncPeriod + yym930 := z.EncBinary() + _ = yym930 + if false { + } else if z.HasExtensions() && z.EncExt(yy929) { + } else if !yym930 && z.IsJSONHandle() { + z.EncJSONMarshal(yy929) + } else { + z.EncFallback(yy929) + } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy930 := &x.DeploymentControllerSyncPeriod - yym931 := z.EncBinary() - _ = yym931 + yym932 := z.EncBinary() + _ = yym932 if false { - } else if z.HasExtensions() && z.EncExt(yy930) { - } else if !yym931 && z.IsJSONHandle() { - z.EncJSONMarshal(yy930) } else { - z.EncFallback(yy930) + r.EncodeInt(int64(x.TerminatedPodGCThreshold)) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("deploymentControllerSyncPeriod")) + r.EncodeString(codecSelferC_UTF81234, string("terminatedPodGCThreshold")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy932 := &x.DeploymentControllerSyncPeriod yym933 := z.EncBinary() _ = yym933 if false { - } else if z.HasExtensions() && z.EncExt(yy932) { - } else if !yym933 && z.IsJSONHandle() { - z.EncJSONMarshal(yy932) } else { - z.EncFallback(yy932) + r.EncodeInt(int64(x.TerminatedPodGCThreshold)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy935 := &x.PodEvictionTimeout + yy935 := &x.HorizontalPodAutoscalerSyncPeriod yym936 := z.EncBinary() _ = yym936 if false { @@ -7938,9 +7966,9 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("podEvictionTimeout")) + r.EncodeString(codecSelferC_UTF81234, string("horizontalPodAutoscalerSyncPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy937 := &x.PodEvictionTimeout + yy937 := &x.HorizontalPodAutoscalerSyncPeriod yym938 := z.EncBinary() _ = yym938 if false { @@ -7951,10 +7979,64 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncFallback(yy937) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy940 := &x.DeploymentControllerSyncPeriod + yym941 := z.EncBinary() + _ = yym941 + if false { + } else if z.HasExtensions() && z.EncExt(yy940) { + } else if !yym941 && z.IsJSONHandle() { + z.EncJSONMarshal(yy940) + } else { + z.EncFallback(yy940) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("deploymentControllerSyncPeriod")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy942 := &x.DeploymentControllerSyncPeriod + yym943 := z.EncBinary() + _ = yym943 + if false { + } else if z.HasExtensions() && z.EncExt(yy942) { + } else if !yym943 && z.IsJSONHandle() { + z.EncJSONMarshal(yy942) + } else { + z.EncFallback(yy942) + } + } + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym940 := z.EncBinary() - _ = yym940 + yy945 := &x.PodEvictionTimeout + yym946 := z.EncBinary() + _ = yym946 + if false { + } else if z.HasExtensions() && z.EncExt(yy945) { + } else if !yym946 && z.IsJSONHandle() { + z.EncJSONMarshal(yy945) + } else { + z.EncFallback(yy945) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("podEvictionTimeout")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy947 := &x.PodEvictionTimeout + yym948 := z.EncBinary() + _ = yym948 + if false { + } else if z.HasExtensions() && z.EncExt(yy947) { + } else if !yym948 && z.IsJSONHandle() { + z.EncJSONMarshal(yy947) + } else { + z.EncFallback(yy947) + } + } + if yyr843 || yy2arr843 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym950 := z.EncBinary() + _ = yym950 if false { } else { r.EncodeFloat32(float32(x.DeletingPodsQps)) @@ -7963,17 +8045,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("deletingPodsQps")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym941 := z.EncBinary() - _ = yym941 + yym951 := z.EncBinary() + _ = yym951 if false { } else { r.EncodeFloat32(float32(x.DeletingPodsQps)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym943 := z.EncBinary() - _ = yym943 + yym953 := z.EncBinary() + _ = yym953 if false { } else { r.EncodeInt(int64(x.DeletingPodsBurst)) @@ -7982,44 +8064,44 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("deletingPodsBurst")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym944 := z.EncBinary() - _ = yym944 + yym954 := z.EncBinary() + _ = yym954 if false { } else { r.EncodeInt(int64(x.DeletingPodsBurst)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy946 := &x.NodeMonitorGracePeriod - yym947 := z.EncBinary() - _ = yym947 + yy956 := &x.NodeMonitorGracePeriod + yym957 := z.EncBinary() + _ = yym957 if false { - } else if z.HasExtensions() && z.EncExt(yy946) { - } else if !yym947 && z.IsJSONHandle() { - z.EncJSONMarshal(yy946) + } else if z.HasExtensions() && z.EncExt(yy956) { + } else if !yym957 && z.IsJSONHandle() { + z.EncJSONMarshal(yy956) } else { - z.EncFallback(yy946) + z.EncFallback(yy956) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeMonitorGracePeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy948 := &x.NodeMonitorGracePeriod - yym949 := z.EncBinary() - _ = yym949 + yy958 := &x.NodeMonitorGracePeriod + yym959 := z.EncBinary() + _ = yym959 if false { - } else if z.HasExtensions() && z.EncExt(yy948) { - } else if !yym949 && z.IsJSONHandle() { - z.EncJSONMarshal(yy948) + } else if z.HasExtensions() && z.EncExt(yy958) { + } else if !yym959 && z.IsJSONHandle() { + z.EncJSONMarshal(yy958) } else { - z.EncFallback(yy948) + z.EncFallback(yy958) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym951 := z.EncBinary() - _ = yym951 + yym961 := z.EncBinary() + _ = yym961 if false { } else { r.EncodeInt(int64(x.RegisterRetryCount)) @@ -8028,71 +8110,71 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("registerRetryCount")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym952 := z.EncBinary() - _ = yym952 + yym962 := z.EncBinary() + _ = yym962 if false { } else { r.EncodeInt(int64(x.RegisterRetryCount)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy954 := &x.NodeStartupGracePeriod - yym955 := z.EncBinary() - _ = yym955 + yy964 := &x.NodeStartupGracePeriod + yym965 := z.EncBinary() + _ = yym965 if false { - } else if z.HasExtensions() && z.EncExt(yy954) { - } else if !yym955 && z.IsJSONHandle() { - z.EncJSONMarshal(yy954) + } else if z.HasExtensions() && z.EncExt(yy964) { + } else if !yym965 && z.IsJSONHandle() { + z.EncJSONMarshal(yy964) } else { - z.EncFallback(yy954) + z.EncFallback(yy964) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeStartupGracePeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy956 := &x.NodeStartupGracePeriod - yym957 := z.EncBinary() - _ = yym957 + yy966 := &x.NodeStartupGracePeriod + yym967 := z.EncBinary() + _ = yym967 if false { - } else if z.HasExtensions() && z.EncExt(yy956) { - } else if !yym957 && z.IsJSONHandle() { - z.EncJSONMarshal(yy956) + } else if z.HasExtensions() && z.EncExt(yy966) { + } else if !yym967 && z.IsJSONHandle() { + z.EncJSONMarshal(yy966) } else { - z.EncFallback(yy956) + z.EncFallback(yy966) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy959 := &x.NodeMonitorPeriod - yym960 := z.EncBinary() - _ = yym960 + yy969 := &x.NodeMonitorPeriod + yym970 := z.EncBinary() + _ = yym970 if false { - } else if z.HasExtensions() && z.EncExt(yy959) { - } else if !yym960 && z.IsJSONHandle() { - z.EncJSONMarshal(yy959) + } else if z.HasExtensions() && z.EncExt(yy969) { + } else if !yym970 && z.IsJSONHandle() { + z.EncJSONMarshal(yy969) } else { - z.EncFallback(yy959) + z.EncFallback(yy969) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeMonitorPeriod")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy961 := &x.NodeMonitorPeriod - yym962 := z.EncBinary() - _ = yym962 + yy971 := &x.NodeMonitorPeriod + yym972 := z.EncBinary() + _ = yym972 if false { - } else if z.HasExtensions() && z.EncExt(yy961) { - } else if !yym962 && z.IsJSONHandle() { - z.EncJSONMarshal(yy961) + } else if z.HasExtensions() && z.EncExt(yy971) { + } else if !yym972 && z.IsJSONHandle() { + z.EncJSONMarshal(yy971) } else { - z.EncFallback(yy961) + z.EncFallback(yy971) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym964 := z.EncBinary() - _ = yym964 + yym974 := z.EncBinary() + _ = yym974 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile)) @@ -8101,17 +8183,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("serviceAccountKeyFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym965 := z.EncBinary() - _ = yym965 + yym975 := z.EncBinary() + _ = yym975 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ServiceAccountKeyFile)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym967 := z.EncBinary() - _ = yym967 + yym977 := z.EncBinary() + _ = yym977 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterSigningCertFile)) @@ -8120,17 +8202,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("clusterSigningCertFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym968 := z.EncBinary() - _ = yym968 + yym978 := z.EncBinary() + _ = yym978 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterSigningCertFile)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym970 := z.EncBinary() - _ = yym970 + yym980 := z.EncBinary() + _ = yym980 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterSigningKeyFile)) @@ -8139,17 +8221,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("clusterSigningKeyFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym971 := z.EncBinary() - _ = yym971 + yym981 := z.EncBinary() + _ = yym981 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterSigningKeyFile)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym973 := z.EncBinary() - _ = yym973 + yym983 := z.EncBinary() + _ = yym983 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ApproveAllKubeletCSRsForGroup)) @@ -8158,17 +8240,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("approveAllKubeletCSRsForGroup")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym974 := z.EncBinary() - _ = yym974 + yym984 := z.EncBinary() + _ = yym984 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ApproveAllKubeletCSRsForGroup)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym976 := z.EncBinary() - _ = yym976 + yym986 := z.EncBinary() + _ = yym986 if false { } else { r.EncodeBool(bool(x.EnableProfiling)) @@ -8177,17 +8259,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("enableProfiling")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym977 := z.EncBinary() - _ = yym977 + yym987 := z.EncBinary() + _ = yym987 if false { } else { r.EncodeBool(bool(x.EnableProfiling)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym979 := z.EncBinary() - _ = yym979 + yym989 := z.EncBinary() + _ = yym989 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) @@ -8196,17 +8278,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("clusterName")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym980 := z.EncBinary() - _ = yym980 + yym990 := z.EncBinary() + _ = yym990 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterName)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym982 := z.EncBinary() - _ = yym982 + yym992 := z.EncBinary() + _ = yym992 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) @@ -8215,17 +8297,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("clusterCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym983 := z.EncBinary() - _ = yym983 + yym993 := z.EncBinary() + _ = yym993 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ClusterCIDR)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym985 := z.EncBinary() - _ = yym985 + yym995 := z.EncBinary() + _ = yym995 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR)) @@ -8234,17 +8316,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("serviceCIDR")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym986 := z.EncBinary() - _ = yym986 + yym996 := z.EncBinary() + _ = yym996 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ServiceCIDR)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym988 := z.EncBinary() - _ = yym988 + yym998 := z.EncBinary() + _ = yym998 if false { } else { r.EncodeInt(int64(x.NodeCIDRMaskSize)) @@ -8253,17 +8335,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeCIDRMaskSize")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym989 := z.EncBinary() - _ = yym989 + yym999 := z.EncBinary() + _ = yym999 if false { } else { r.EncodeInt(int64(x.NodeCIDRMaskSize)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym991 := z.EncBinary() - _ = yym991 + yym1001 := z.EncBinary() + _ = yym1001 if false { } else { r.EncodeBool(bool(x.AllocateNodeCIDRs)) @@ -8272,17 +8354,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("allocateNodeCIDRs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym992 := z.EncBinary() - _ = yym992 + yym1002 := z.EncBinary() + _ = yym1002 if false { } else { r.EncodeBool(bool(x.AllocateNodeCIDRs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym994 := z.EncBinary() - _ = yym994 + yym1004 := z.EncBinary() + _ = yym1004 if false { } else { r.EncodeBool(bool(x.ConfigureCloudRoutes)) @@ -8291,17 +8373,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("configureCloudRoutes")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym995 := z.EncBinary() - _ = yym995 + yym1005 := z.EncBinary() + _ = yym1005 if false { } else { r.EncodeBool(bool(x.ConfigureCloudRoutes)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym997 := z.EncBinary() - _ = yym997 + yym1007 := z.EncBinary() + _ = yym1007 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) @@ -8310,17 +8392,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("rootCAFile")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym998 := z.EncBinary() - _ = yym998 + yym1008 := z.EncBinary() + _ = yym1008 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.RootCAFile)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1000 := z.EncBinary() - _ = yym1000 + yym1010 := z.EncBinary() + _ = yym1010 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) @@ -8329,17 +8411,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("contentType")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1001 := z.EncBinary() - _ = yym1001 + yym1011 := z.EncBinary() + _ = yym1011 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.ContentType)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1003 := z.EncBinary() - _ = yym1003 + yym1013 := z.EncBinary() + _ = yym1013 if false { } else { r.EncodeFloat32(float32(x.KubeAPIQPS)) @@ -8348,17 +8430,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIQPS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1004 := z.EncBinary() - _ = yym1004 + yym1014 := z.EncBinary() + _ = yym1014 if false { } else { r.EncodeFloat32(float32(x.KubeAPIQPS)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1006 := z.EncBinary() - _ = yym1006 + yym1016 := z.EncBinary() + _ = yym1016 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) @@ -8367,66 +8449,66 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("kubeAPIBurst")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1007 := z.EncBinary() - _ = yym1007 + yym1017 := z.EncBinary() + _ = yym1017 if false { } else { r.EncodeInt(int64(x.KubeAPIBurst)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1009 := &x.LeaderElection - yy1009.CodecEncodeSelf(e) + yy1019 := &x.LeaderElection + yy1019.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("leaderElection")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1010 := &x.LeaderElection - yy1010.CodecEncodeSelf(e) + yy1020 := &x.LeaderElection + yy1020.CodecEncodeSelf(e) } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1012 := &x.VolumeConfiguration - yy1012.CodecEncodeSelf(e) + yy1022 := &x.VolumeConfiguration + yy1022.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("volumeConfiguration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1013 := &x.VolumeConfiguration - yy1013.CodecEncodeSelf(e) + yy1023 := &x.VolumeConfiguration + yy1023.CodecEncodeSelf(e) } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1015 := &x.ControllerStartInterval - yym1016 := z.EncBinary() - _ = yym1016 + yy1025 := &x.ControllerStartInterval + yym1026 := z.EncBinary() + _ = yym1026 if false { - } else if z.HasExtensions() && z.EncExt(yy1015) { - } else if !yym1016 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1015) + } else if z.HasExtensions() && z.EncExt(yy1025) { + } else if !yym1026 && z.IsJSONHandle() { + z.EncJSONMarshal(yy1025) } else { - z.EncFallback(yy1015) + z.EncFallback(yy1025) } } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("controllerStartInterval")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1017 := &x.ControllerStartInterval - yym1018 := z.EncBinary() - _ = yym1018 + yy1027 := &x.ControllerStartInterval + yym1028 := z.EncBinary() + _ = yym1028 if false { - } else if z.HasExtensions() && z.EncExt(yy1017) { - } else if !yym1018 && z.IsJSONHandle() { - z.EncJSONMarshal(yy1017) + } else if z.HasExtensions() && z.EncExt(yy1027) { + } else if !yym1028 && z.IsJSONHandle() { + z.EncJSONMarshal(yy1027) } else { - z.EncFallback(yy1017) + z.EncFallback(yy1027) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1020 := z.EncBinary() - _ = yym1020 + yym1030 := z.EncBinary() + _ = yym1030 if false { } else { r.EncodeBool(bool(x.EnableGarbageCollector)) @@ -8435,17 +8517,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("enableGarbageCollector")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1021 := z.EncBinary() - _ = yym1021 + yym1031 := z.EncBinary() + _ = yym1031 if false { } else { r.EncodeBool(bool(x.EnableGarbageCollector)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1023 := z.EncBinary() - _ = yym1023 + yym1033 := z.EncBinary() + _ = yym1033 if false { } else { r.EncodeInt(int64(x.ConcurrentGCSyncs)) @@ -8454,17 +8536,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("concurrentGCSyncs")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1024 := z.EncBinary() - _ = yym1024 + yym1034 := z.EncBinary() + _ = yym1034 if false { } else { r.EncodeInt(int64(x.ConcurrentGCSyncs)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1026 := z.EncBinary() - _ = yym1026 + yym1036 := z.EncBinary() + _ = yym1036 if false { } else { r.EncodeFloat32(float32(x.NodeEvictionRate)) @@ -8473,17 +8555,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("nodeEvictionRate")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1027 := z.EncBinary() - _ = yym1027 + yym1037 := z.EncBinary() + _ = yym1037 if false { } else { r.EncodeFloat32(float32(x.NodeEvictionRate)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1029 := z.EncBinary() - _ = yym1029 + yym1039 := z.EncBinary() + _ = yym1039 if false { } else { r.EncodeFloat32(float32(x.SecondaryNodeEvictionRate)) @@ -8492,17 +8574,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("secondaryNodeEvictionRate")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1030 := z.EncBinary() - _ = yym1030 + yym1040 := z.EncBinary() + _ = yym1040 if false { } else { r.EncodeFloat32(float32(x.SecondaryNodeEvictionRate)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1032 := z.EncBinary() - _ = yym1032 + yym1042 := z.EncBinary() + _ = yym1042 if false { } else { r.EncodeInt(int64(x.LargeClusterSizeThreshold)) @@ -8511,17 +8593,17 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("largeClusterSizeThreshold")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1033 := z.EncBinary() - _ = yym1033 + yym1043 := z.EncBinary() + _ = yym1043 if false { } else { r.EncodeInt(int64(x.LargeClusterSizeThreshold)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1035 := z.EncBinary() - _ = yym1035 + yym1045 := z.EncBinary() + _ = yym1045 if false { } else { r.EncodeFloat32(float32(x.UnhealthyZoneThreshold)) @@ -8530,14 +8612,14 @@ func (x *KubeControllerManagerConfiguration) CodecEncodeSelf(e *codec1978.Encode z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("unhealthyZoneThreshold")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1036 := z.EncBinary() - _ = yym1036 + yym1046 := z.EncBinary() + _ = yym1046 if false { } else { r.EncodeFloat32(float32(x.UnhealthyZoneThreshold)) } } - if yyr833 || yy2arr833 { + if yyr843 || yy2arr843 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -8550,25 +8632,25 @@ func (x *KubeControllerManagerConfiguration) CodecDecodeSelf(d *codec1978.Decode var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1037 := z.DecBinary() - _ = yym1037 + yym1047 := z.DecBinary() + _ = yym1047 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1038 := r.ContainerType() - if yyct1038 == codecSelferValueTypeMap1234 { - yyl1038 := r.ReadMapStart() - if yyl1038 == 0 { + yyct1048 := r.ContainerType() + if yyct1048 == codecSelferValueTypeMap1234 { + yyl1048 := r.ReadMapStart() + if yyl1048 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1038, d) + x.codecDecodeSelfFromMap(yyl1048, d) } - } else if yyct1038 == codecSelferValueTypeArray1234 { - yyl1038 := r.ReadArrayStart() - if yyl1038 == 0 { + } else if yyct1048 == codecSelferValueTypeArray1234 { + yyl1048 := r.ReadArrayStart() + if yyl1048 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1038, d) + x.codecDecodeSelfFromArray(yyl1048, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -8580,12 +8662,12 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1039Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1039Slc - var yyhl1039 bool = l >= 0 - for yyj1039 := 0; ; yyj1039++ { - if yyhl1039 { - if yyj1039 >= l { + var yys1049Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys1049Slc + var yyhl1049 bool = l >= 0 + for yyj1049 := 0; ; yyj1049++ { + if yyhl1049 { + if yyj1049 >= l { break } } else { @@ -8594,10 +8676,10 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1039Slc = r.DecodeBytes(yys1039Slc, true, true) - yys1039 := string(yys1039Slc) + yys1049Slc = r.DecodeBytes(yys1049Slc, true, true) + yys1049 := string(yys1049Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1039 { + switch yys1049 { case "kind": if r.TryDecodeAsNil() { x.Kind = "" @@ -8716,90 +8798,90 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.ServiceSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1059 := &x.ServiceSyncPeriod - yym1060 := z.DecBinary() - _ = yym1060 + yyv1069 := &x.ServiceSyncPeriod + yym1070 := z.DecBinary() + _ = yym1070 if false { - } else if z.HasExtensions() && z.DecExt(yyv1059) { - } else if !yym1060 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1059) + } else if z.HasExtensions() && z.DecExt(yyv1069) { + } else if !yym1070 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1069) } else { - z.DecFallback(yyv1059, false) + z.DecFallback(yyv1069, false) } } case "nodeSyncPeriod": if r.TryDecodeAsNil() { x.NodeSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1061 := &x.NodeSyncPeriod - yym1062 := z.DecBinary() - _ = yym1062 + yyv1071 := &x.NodeSyncPeriod + yym1072 := z.DecBinary() + _ = yym1072 if false { - } else if z.HasExtensions() && z.DecExt(yyv1061) { - } else if !yym1062 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1061) + } else if z.HasExtensions() && z.DecExt(yyv1071) { + } else if !yym1072 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1071) } else { - z.DecFallback(yyv1061, false) + z.DecFallback(yyv1071, false) } } case "resourceQuotaSyncPeriod": if r.TryDecodeAsNil() { x.ResourceQuotaSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1063 := &x.ResourceQuotaSyncPeriod - yym1064 := z.DecBinary() - _ = yym1064 + yyv1073 := &x.ResourceQuotaSyncPeriod + yym1074 := z.DecBinary() + _ = yym1074 if false { - } else if z.HasExtensions() && z.DecExt(yyv1063) { - } else if !yym1064 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1063) + } else if z.HasExtensions() && z.DecExt(yyv1073) { + } else if !yym1074 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1073) } else { - z.DecFallback(yyv1063, false) + z.DecFallback(yyv1073, false) } } case "namespaceSyncPeriod": if r.TryDecodeAsNil() { x.NamespaceSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1065 := &x.NamespaceSyncPeriod - yym1066 := z.DecBinary() - _ = yym1066 + yyv1075 := &x.NamespaceSyncPeriod + yym1076 := z.DecBinary() + _ = yym1076 if false { - } else if z.HasExtensions() && z.DecExt(yyv1065) { - } else if !yym1066 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1065) + } else if z.HasExtensions() && z.DecExt(yyv1075) { + } else if !yym1076 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1075) } else { - z.DecFallback(yyv1065, false) + z.DecFallback(yyv1075, false) } } case "pvClaimBinderSyncPeriod": if r.TryDecodeAsNil() { x.PVClaimBinderSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1067 := &x.PVClaimBinderSyncPeriod - yym1068 := z.DecBinary() - _ = yym1068 + yyv1077 := &x.PVClaimBinderSyncPeriod + yym1078 := z.DecBinary() + _ = yym1078 if false { - } else if z.HasExtensions() && z.DecExt(yyv1067) { - } else if !yym1068 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1067) + } else if z.HasExtensions() && z.DecExt(yyv1077) { + } else if !yym1078 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1077) } else { - z.DecFallback(yyv1067, false) + z.DecFallback(yyv1077, false) } } case "minResyncPeriod": if r.TryDecodeAsNil() { x.MinResyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1069 := &x.MinResyncPeriod - yym1070 := z.DecBinary() - _ = yym1070 + yyv1079 := &x.MinResyncPeriod + yym1080 := z.DecBinary() + _ = yym1080 if false { - } else if z.HasExtensions() && z.DecExt(yyv1069) { - } else if !yym1070 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1069) + } else if z.HasExtensions() && z.DecExt(yyv1079) { + } else if !yym1080 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1079) } else { - z.DecFallback(yyv1069, false) + z.DecFallback(yyv1079, false) } } case "terminatedPodGCThreshold": @@ -8812,45 +8894,45 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.HorizontalPodAutoscalerSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1072 := &x.HorizontalPodAutoscalerSyncPeriod - yym1073 := z.DecBinary() - _ = yym1073 + yyv1082 := &x.HorizontalPodAutoscalerSyncPeriod + yym1083 := z.DecBinary() + _ = yym1083 if false { - } else if z.HasExtensions() && z.DecExt(yyv1072) { - } else if !yym1073 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1072) + } else if z.HasExtensions() && z.DecExt(yyv1082) { + } else if !yym1083 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1082) } else { - z.DecFallback(yyv1072, false) + z.DecFallback(yyv1082, false) } } case "deploymentControllerSyncPeriod": if r.TryDecodeAsNil() { x.DeploymentControllerSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1074 := &x.DeploymentControllerSyncPeriod - yym1075 := z.DecBinary() - _ = yym1075 + yyv1084 := &x.DeploymentControllerSyncPeriod + yym1085 := z.DecBinary() + _ = yym1085 if false { - } else if z.HasExtensions() && z.DecExt(yyv1074) { - } else if !yym1075 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1074) + } else if z.HasExtensions() && z.DecExt(yyv1084) { + } else if !yym1085 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1084) } else { - z.DecFallback(yyv1074, false) + z.DecFallback(yyv1084, false) } } case "podEvictionTimeout": if r.TryDecodeAsNil() { x.PodEvictionTimeout = pkg1_unversioned.Duration{} } else { - yyv1076 := &x.PodEvictionTimeout - yym1077 := z.DecBinary() - _ = yym1077 + yyv1086 := &x.PodEvictionTimeout + yym1087 := z.DecBinary() + _ = yym1087 if false { - } else if z.HasExtensions() && z.DecExt(yyv1076) { - } else if !yym1077 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1076) + } else if z.HasExtensions() && z.DecExt(yyv1086) { + } else if !yym1087 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1086) } else { - z.DecFallback(yyv1076, false) + z.DecFallback(yyv1086, false) } } case "deletingPodsQps": @@ -8869,15 +8951,15 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.NodeMonitorGracePeriod = pkg1_unversioned.Duration{} } else { - yyv1080 := &x.NodeMonitorGracePeriod - yym1081 := z.DecBinary() - _ = yym1081 + yyv1090 := &x.NodeMonitorGracePeriod + yym1091 := z.DecBinary() + _ = yym1091 if false { - } else if z.HasExtensions() && z.DecExt(yyv1080) { - } else if !yym1081 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1080) + } else if z.HasExtensions() && z.DecExt(yyv1090) { + } else if !yym1091 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1090) } else { - z.DecFallback(yyv1080, false) + z.DecFallback(yyv1090, false) } } case "registerRetryCount": @@ -8890,30 +8972,30 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.NodeStartupGracePeriod = pkg1_unversioned.Duration{} } else { - yyv1083 := &x.NodeStartupGracePeriod - yym1084 := z.DecBinary() - _ = yym1084 + yyv1093 := &x.NodeStartupGracePeriod + yym1094 := z.DecBinary() + _ = yym1094 if false { - } else if z.HasExtensions() && z.DecExt(yyv1083) { - } else if !yym1084 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1083) + } else if z.HasExtensions() && z.DecExt(yyv1093) { + } else if !yym1094 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1093) } else { - z.DecFallback(yyv1083, false) + z.DecFallback(yyv1093, false) } } case "nodeMonitorPeriod": if r.TryDecodeAsNil() { x.NodeMonitorPeriod = pkg1_unversioned.Duration{} } else { - yyv1085 := &x.NodeMonitorPeriod - yym1086 := z.DecBinary() - _ = yym1086 + yyv1095 := &x.NodeMonitorPeriod + yym1096 := z.DecBinary() + _ = yym1096 if false { - } else if z.HasExtensions() && z.DecExt(yyv1085) { - } else if !yym1086 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1085) + } else if z.HasExtensions() && z.DecExt(yyv1095) { + } else if !yym1096 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1095) } else { - z.DecFallback(yyv1085, false) + z.DecFallback(yyv1095, false) } } case "serviceAccountKeyFile": @@ -9010,29 +9092,29 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv1102 := &x.LeaderElection - yyv1102.CodecDecodeSelf(d) + yyv1112 := &x.LeaderElection + yyv1112.CodecDecodeSelf(d) } case "volumeConfiguration": if r.TryDecodeAsNil() { x.VolumeConfiguration = VolumeConfiguration{} } else { - yyv1103 := &x.VolumeConfiguration - yyv1103.CodecDecodeSelf(d) + yyv1113 := &x.VolumeConfiguration + yyv1113.CodecDecodeSelf(d) } case "controllerStartInterval": if r.TryDecodeAsNil() { x.ControllerStartInterval = pkg1_unversioned.Duration{} } else { - yyv1104 := &x.ControllerStartInterval - yym1105 := z.DecBinary() - _ = yym1105 + yyv1114 := &x.ControllerStartInterval + yym1115 := z.DecBinary() + _ = yym1115 if false { - } else if z.HasExtensions() && z.DecExt(yyv1104) { - } else if !yym1105 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1104) + } else if z.HasExtensions() && z.DecExt(yyv1114) { + } else if !yym1115 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1114) } else { - z.DecFallback(yyv1104, false) + z.DecFallback(yyv1114, false) } } case "enableGarbageCollector": @@ -9072,9 +9154,9 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromMap(l int, d *co x.UnhealthyZoneThreshold = float32(r.DecodeFloat(true)) } default: - z.DecStructFieldNotFound(-1, yys1039) - } // end switch yys1039 - } // end for yyj1039 + z.DecStructFieldNotFound(-1, yys1049) + } // end switch yys1049 + } // end for yyj1049 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -9082,16 +9164,16 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1112 int - var yyb1112 bool - var yyhl1112 bool = l >= 0 - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + var yyj1122 int + var yyb1122 bool + var yyhl1122 bool = l >= 0 + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9101,13 +9183,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.Kind = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9117,13 +9199,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.APIVersion = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9133,13 +9215,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.Port = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9149,13 +9231,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.Address = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9165,13 +9247,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.CloudProvider = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9181,13 +9263,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.CloudConfigFile = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9197,13 +9279,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentEndpointSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9213,13 +9295,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentRSSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9229,13 +9311,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentRCSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9245,13 +9327,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentServiceSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9261,13 +9343,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentResourceQuotaSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9277,13 +9359,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentDeploymentSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9293,13 +9375,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentDaemonSetSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9309,13 +9391,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentJobSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9325,13 +9407,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentNamespaceSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9341,13 +9423,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentSATokenSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9357,13 +9439,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.LookupCacheSizeForRC = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9373,13 +9455,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.LookupCacheSizeForRS = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9389,13 +9471,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.LookupCacheSizeForDaemonSet = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9403,24 +9485,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ServiceSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1132 := &x.ServiceSyncPeriod - yym1133 := z.DecBinary() - _ = yym1133 + yyv1142 := &x.ServiceSyncPeriod + yym1143 := z.DecBinary() + _ = yym1143 if false { - } else if z.HasExtensions() && z.DecExt(yyv1132) { - } else if !yym1133 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1132) + } else if z.HasExtensions() && z.DecExt(yyv1142) { + } else if !yym1143 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1142) } else { - z.DecFallback(yyv1132, false) + z.DecFallback(yyv1142, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9428,24 +9510,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NodeSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1134 := &x.NodeSyncPeriod - yym1135 := z.DecBinary() - _ = yym1135 + yyv1144 := &x.NodeSyncPeriod + yym1145 := z.DecBinary() + _ = yym1145 if false { - } else if z.HasExtensions() && z.DecExt(yyv1134) { - } else if !yym1135 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1134) + } else if z.HasExtensions() && z.DecExt(yyv1144) { + } else if !yym1145 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1144) } else { - z.DecFallback(yyv1134, false) + z.DecFallback(yyv1144, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9453,24 +9535,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ResourceQuotaSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1136 := &x.ResourceQuotaSyncPeriod - yym1137 := z.DecBinary() - _ = yym1137 + yyv1146 := &x.ResourceQuotaSyncPeriod + yym1147 := z.DecBinary() + _ = yym1147 if false { - } else if z.HasExtensions() && z.DecExt(yyv1136) { - } else if !yym1137 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1136) + } else if z.HasExtensions() && z.DecExt(yyv1146) { + } else if !yym1147 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1146) } else { - z.DecFallback(yyv1136, false) + z.DecFallback(yyv1146, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9478,24 +9560,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NamespaceSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1138 := &x.NamespaceSyncPeriod - yym1139 := z.DecBinary() - _ = yym1139 + yyv1148 := &x.NamespaceSyncPeriod + yym1149 := z.DecBinary() + _ = yym1149 if false { - } else if z.HasExtensions() && z.DecExt(yyv1138) { - } else if !yym1139 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1138) + } else if z.HasExtensions() && z.DecExt(yyv1148) { + } else if !yym1149 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1148) } else { - z.DecFallback(yyv1138, false) + z.DecFallback(yyv1148, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9503,24 +9585,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.PVClaimBinderSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1140 := &x.PVClaimBinderSyncPeriod - yym1141 := z.DecBinary() - _ = yym1141 + yyv1150 := &x.PVClaimBinderSyncPeriod + yym1151 := z.DecBinary() + _ = yym1151 if false { - } else if z.HasExtensions() && z.DecExt(yyv1140) { - } else if !yym1141 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1140) + } else if z.HasExtensions() && z.DecExt(yyv1150) { + } else if !yym1151 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1150) } else { - z.DecFallback(yyv1140, false) + z.DecFallback(yyv1150, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9528,24 +9610,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.MinResyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1142 := &x.MinResyncPeriod - yym1143 := z.DecBinary() - _ = yym1143 + yyv1152 := &x.MinResyncPeriod + yym1153 := z.DecBinary() + _ = yym1153 if false { - } else if z.HasExtensions() && z.DecExt(yyv1142) { - } else if !yym1143 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1142) + } else if z.HasExtensions() && z.DecExt(yyv1152) { + } else if !yym1153 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1152) } else { - z.DecFallback(yyv1142, false) + z.DecFallback(yyv1152, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9555,13 +9637,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.TerminatedPodGCThreshold = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9569,24 +9651,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.HorizontalPodAutoscalerSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1145 := &x.HorizontalPodAutoscalerSyncPeriod - yym1146 := z.DecBinary() - _ = yym1146 + yyv1155 := &x.HorizontalPodAutoscalerSyncPeriod + yym1156 := z.DecBinary() + _ = yym1156 if false { - } else if z.HasExtensions() && z.DecExt(yyv1145) { - } else if !yym1146 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1145) + } else if z.HasExtensions() && z.DecExt(yyv1155) { + } else if !yym1156 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1155) } else { - z.DecFallback(yyv1145, false) + z.DecFallback(yyv1155, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9594,24 +9676,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.DeploymentControllerSyncPeriod = pkg1_unversioned.Duration{} } else { - yyv1147 := &x.DeploymentControllerSyncPeriod - yym1148 := z.DecBinary() - _ = yym1148 + yyv1157 := &x.DeploymentControllerSyncPeriod + yym1158 := z.DecBinary() + _ = yym1158 if false { - } else if z.HasExtensions() && z.DecExt(yyv1147) { - } else if !yym1148 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1147) + } else if z.HasExtensions() && z.DecExt(yyv1157) { + } else if !yym1158 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1157) } else { - z.DecFallback(yyv1147, false) + z.DecFallback(yyv1157, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9619,24 +9701,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.PodEvictionTimeout = pkg1_unversioned.Duration{} } else { - yyv1149 := &x.PodEvictionTimeout - yym1150 := z.DecBinary() - _ = yym1150 + yyv1159 := &x.PodEvictionTimeout + yym1160 := z.DecBinary() + _ = yym1160 if false { - } else if z.HasExtensions() && z.DecExt(yyv1149) { - } else if !yym1150 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1149) + } else if z.HasExtensions() && z.DecExt(yyv1159) { + } else if !yym1160 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1159) } else { - z.DecFallback(yyv1149, false) + z.DecFallback(yyv1159, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9646,13 +9728,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.DeletingPodsQps = float32(r.DecodeFloat(true)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9662,13 +9744,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.DeletingPodsBurst = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9676,24 +9758,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NodeMonitorGracePeriod = pkg1_unversioned.Duration{} } else { - yyv1153 := &x.NodeMonitorGracePeriod - yym1154 := z.DecBinary() - _ = yym1154 + yyv1163 := &x.NodeMonitorGracePeriod + yym1164 := z.DecBinary() + _ = yym1164 if false { - } else if z.HasExtensions() && z.DecExt(yyv1153) { - } else if !yym1154 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1153) + } else if z.HasExtensions() && z.DecExt(yyv1163) { + } else if !yym1164 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1163) } else { - z.DecFallback(yyv1153, false) + z.DecFallback(yyv1163, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9703,13 +9785,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.RegisterRetryCount = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9717,24 +9799,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NodeStartupGracePeriod = pkg1_unversioned.Duration{} } else { - yyv1156 := &x.NodeStartupGracePeriod - yym1157 := z.DecBinary() - _ = yym1157 + yyv1166 := &x.NodeStartupGracePeriod + yym1167 := z.DecBinary() + _ = yym1167 if false { - } else if z.HasExtensions() && z.DecExt(yyv1156) { - } else if !yym1157 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1156) + } else if z.HasExtensions() && z.DecExt(yyv1166) { + } else if !yym1167 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1166) } else { - z.DecFallback(yyv1156, false) + z.DecFallback(yyv1166, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9742,24 +9824,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.NodeMonitorPeriod = pkg1_unversioned.Duration{} } else { - yyv1158 := &x.NodeMonitorPeriod - yym1159 := z.DecBinary() - _ = yym1159 + yyv1168 := &x.NodeMonitorPeriod + yym1169 := z.DecBinary() + _ = yym1169 if false { - } else if z.HasExtensions() && z.DecExt(yyv1158) { - } else if !yym1159 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1158) + } else if z.HasExtensions() && z.DecExt(yyv1168) { + } else if !yym1169 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1168) } else { - z.DecFallback(yyv1158, false) + z.DecFallback(yyv1168, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9769,13 +9851,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ServiceAccountKeyFile = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9785,13 +9867,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ClusterSigningCertFile = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9801,13 +9883,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ClusterSigningKeyFile = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9817,13 +9899,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ApproveAllKubeletCSRsForGroup = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9833,13 +9915,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.EnableProfiling = bool(r.DecodeBool()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9849,13 +9931,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ClusterName = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9865,13 +9947,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ClusterCIDR = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9881,13 +9963,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ServiceCIDR = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9897,13 +9979,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.NodeCIDRMaskSize = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9913,13 +9995,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.AllocateNodeCIDRs = bool(r.DecodeBool()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9929,13 +10011,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConfigureCloudRoutes = bool(r.DecodeBool()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9945,13 +10027,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.RootCAFile = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9961,13 +10043,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ContentType = string(r.DecodeString()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9977,13 +10059,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.KubeAPIQPS = float32(r.DecodeFloat(true)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -9993,13 +10075,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.KubeAPIBurst = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10007,16 +10089,16 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.LeaderElection = LeaderElectionConfiguration{} } else { - yyv1175 := &x.LeaderElection - yyv1175.CodecDecodeSelf(d) + yyv1185 := &x.LeaderElection + yyv1185.CodecDecodeSelf(d) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10024,16 +10106,16 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.VolumeConfiguration = VolumeConfiguration{} } else { - yyv1176 := &x.VolumeConfiguration - yyv1176.CodecDecodeSelf(d) + yyv1186 := &x.VolumeConfiguration + yyv1186.CodecDecodeSelf(d) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10041,24 +10123,24 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * if r.TryDecodeAsNil() { x.ControllerStartInterval = pkg1_unversioned.Duration{} } else { - yyv1177 := &x.ControllerStartInterval - yym1178 := z.DecBinary() - _ = yym1178 + yyv1187 := &x.ControllerStartInterval + yym1188 := z.DecBinary() + _ = yym1188 if false { - } else if z.HasExtensions() && z.DecExt(yyv1177) { - } else if !yym1178 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv1177) + } else if z.HasExtensions() && z.DecExt(yyv1187) { + } else if !yym1188 && z.IsJSONHandle() { + z.DecJSONUnmarshal(yyv1187) } else { - z.DecFallback(yyv1177, false) + z.DecFallback(yyv1187, false) } } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10068,13 +10150,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.EnableGarbageCollector = bool(r.DecodeBool()) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10084,13 +10166,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.ConcurrentGCSyncs = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10100,13 +10182,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.NodeEvictionRate = float32(r.DecodeFloat(true)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10116,13 +10198,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.SecondaryNodeEvictionRate = float32(r.DecodeFloat(true)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10132,13 +10214,13 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * } else { x.LargeClusterSizeThreshold = int32(r.DecodeInt(32)) } - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10149,17 +10231,17 @@ func (x *KubeControllerManagerConfiguration) codecDecodeSelfFromArray(l int, d * x.UnhealthyZoneThreshold = float32(r.DecodeFloat(true)) } for { - yyj1112++ - if yyhl1112 { - yyb1112 = yyj1112 > l + yyj1122++ + if yyhl1122 { + yyb1122 = yyj1122 > l } else { - yyb1112 = r.CheckBreak() + yyb1122 = r.CheckBreak() } - if yyb1112 { + if yyb1122 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1112-1, "") + z.DecStructFieldNotFound(yyj1122-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -10171,33 +10253,33 @@ func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { if x == nil { r.EncodeNil() } else { - yym1185 := z.EncBinary() - _ = yym1185 + yym1195 := z.EncBinary() + _ = yym1195 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1186 := !z.EncBinary() - yy2arr1186 := z.EncBasicHandle().StructToArray - var yyq1186 [4]bool - _, _, _ = yysep1186, yyq1186, yy2arr1186 - const yyr1186 bool = false - var yynn1186 int - if yyr1186 || yy2arr1186 { + yysep1196 := !z.EncBinary() + yy2arr1196 := z.EncBasicHandle().StructToArray + var yyq1196 [4]bool + _, _, _ = yysep1196, yyq1196, yy2arr1196 + const yyr1196 bool = false + var yynn1196 int + if yyr1196 || yy2arr1196 { r.EncodeArrayStart(4) } else { - yynn1186 = 4 - for _, b := range yyq1186 { + yynn1196 = 4 + for _, b := range yyq1196 { if b { - yynn1186++ + yynn1196++ } } - r.EncodeMapStart(yynn1186) - yynn1186 = 0 + r.EncodeMapStart(yynn1196) + yynn1196 = 0 } - if yyr1186 || yy2arr1186 { + if yyr1196 || yy2arr1196 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1188 := z.EncBinary() - _ = yym1188 + yym1198 := z.EncBinary() + _ = yym1198 if false { } else { r.EncodeBool(bool(x.EnableHostPathProvisioning)) @@ -10206,17 +10288,17 @@ func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("enableHostPathProvisioning")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1189 := z.EncBinary() - _ = yym1189 + yym1199 := z.EncBinary() + _ = yym1199 if false { } else { r.EncodeBool(bool(x.EnableHostPathProvisioning)) } } - if yyr1186 || yy2arr1186 { + if yyr1196 || yy2arr1196 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1191 := z.EncBinary() - _ = yym1191 + yym1201 := z.EncBinary() + _ = yym1201 if false { } else { r.EncodeBool(bool(x.EnableDynamicProvisioning)) @@ -10225,28 +10307,28 @@ func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("enableDynamicProvisioning")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1192 := z.EncBinary() - _ = yym1192 + yym1202 := z.EncBinary() + _ = yym1202 if false { } else { r.EncodeBool(bool(x.EnableDynamicProvisioning)) } } - if yyr1186 || yy2arr1186 { + if yyr1196 || yy2arr1196 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1194 := &x.PersistentVolumeRecyclerConfiguration - yy1194.CodecEncodeSelf(e) + yy1204 := &x.PersistentVolumeRecyclerConfiguration + yy1204.CodecEncodeSelf(e) } else { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("persitentVolumeRecyclerConfiguration")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1195 := &x.PersistentVolumeRecyclerConfiguration - yy1195.CodecEncodeSelf(e) + yy1205 := &x.PersistentVolumeRecyclerConfiguration + yy1205.CodecEncodeSelf(e) } - if yyr1186 || yy2arr1186 { + if yyr1196 || yy2arr1196 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1197 := z.EncBinary() - _ = yym1197 + yym1207 := z.EncBinary() + _ = yym1207 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir)) @@ -10255,14 +10337,14 @@ func (x *VolumeConfiguration) CodecEncodeSelf(e *codec1978.Encoder) { z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("flexVolumePluginDir")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1198 := z.EncBinary() - _ = yym1198 + yym1208 := z.EncBinary() + _ = yym1208 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.FlexVolumePluginDir)) } } - if yyr1186 || yy2arr1186 { + if yyr1196 || yy2arr1196 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -10275,25 +10357,25 @@ func (x *VolumeConfiguration) CodecDecodeSelf(d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1199 := z.DecBinary() - _ = yym1199 + yym1209 := z.DecBinary() + _ = yym1209 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1200 := r.ContainerType() - if yyct1200 == codecSelferValueTypeMap1234 { - yyl1200 := r.ReadMapStart() - if yyl1200 == 0 { + yyct1210 := r.ContainerType() + if yyct1210 == codecSelferValueTypeMap1234 { + yyl1210 := r.ReadMapStart() + if yyl1210 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1200, d) + x.codecDecodeSelfFromMap(yyl1210, d) } - } else if yyct1200 == codecSelferValueTypeArray1234 { - yyl1200 := r.ReadArrayStart() - if yyl1200 == 0 { + } else if yyct1210 == codecSelferValueTypeArray1234 { + yyl1210 := r.ReadArrayStart() + if yyl1210 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1200, d) + x.codecDecodeSelfFromArray(yyl1210, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -10305,12 +10387,12 @@ func (x *VolumeConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1201Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1201Slc - var yyhl1201 bool = l >= 0 - for yyj1201 := 0; ; yyj1201++ { - if yyhl1201 { - if yyj1201 >= l { + var yys1211Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys1211Slc + var yyhl1211 bool = l >= 0 + for yyj1211 := 0; ; yyj1211++ { + if yyhl1211 { + if yyj1211 >= l { break } } else { @@ -10319,10 +10401,10 @@ func (x *VolumeConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1201Slc = r.DecodeBytes(yys1201Slc, true, true) - yys1201 := string(yys1201Slc) + yys1211Slc = r.DecodeBytes(yys1211Slc, true, true) + yys1211 := string(yys1211Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1201 { + switch yys1211 { case "enableHostPathProvisioning": if r.TryDecodeAsNil() { x.EnableHostPathProvisioning = false @@ -10339,8 +10421,8 @@ func (x *VolumeConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder if r.TryDecodeAsNil() { x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{} } else { - yyv1204 := &x.PersistentVolumeRecyclerConfiguration - yyv1204.CodecDecodeSelf(d) + yyv1214 := &x.PersistentVolumeRecyclerConfiguration + yyv1214.CodecDecodeSelf(d) } case "flexVolumePluginDir": if r.TryDecodeAsNil() { @@ -10349,9 +10431,9 @@ func (x *VolumeConfiguration) codecDecodeSelfFromMap(l int, d *codec1978.Decoder x.FlexVolumePluginDir = string(r.DecodeString()) } default: - z.DecStructFieldNotFound(-1, yys1201) - } // end switch yys1201 - } // end for yyj1201 + z.DecStructFieldNotFound(-1, yys1211) + } // end switch yys1211 + } // end for yyj1211 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -10359,16 +10441,16 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1206 int - var yyb1206 bool - var yyhl1206 bool = l >= 0 - yyj1206++ - if yyhl1206 { - yyb1206 = yyj1206 > l + var yyj1216 int + var yyb1216 bool + var yyhl1216 bool = l >= 0 + yyj1216++ + if yyhl1216 { + yyb1216 = yyj1216 > l } else { - yyb1206 = r.CheckBreak() + yyb1216 = r.CheckBreak() } - if yyb1206 { + if yyb1216 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10378,13 +10460,13 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod } else { x.EnableHostPathProvisioning = bool(r.DecodeBool()) } - yyj1206++ - if yyhl1206 { - yyb1206 = yyj1206 > l + yyj1216++ + if yyhl1216 { + yyb1216 = yyj1216 > l } else { - yyb1206 = r.CheckBreak() + yyb1216 = r.CheckBreak() } - if yyb1206 { + if yyb1216 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10394,13 +10476,13 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod } else { x.EnableDynamicProvisioning = bool(r.DecodeBool()) } - yyj1206++ - if yyhl1206 { - yyb1206 = yyj1206 > l + yyj1216++ + if yyhl1216 { + yyb1216 = yyj1216 > l } else { - yyb1206 = r.CheckBreak() + yyb1216 = r.CheckBreak() } - if yyb1206 { + if yyb1216 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10408,16 +10490,16 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod if r.TryDecodeAsNil() { x.PersistentVolumeRecyclerConfiguration = PersistentVolumeRecyclerConfiguration{} } else { - yyv1209 := &x.PersistentVolumeRecyclerConfiguration - yyv1209.CodecDecodeSelf(d) + yyv1219 := &x.PersistentVolumeRecyclerConfiguration + yyv1219.CodecDecodeSelf(d) } - yyj1206++ - if yyhl1206 { - yyb1206 = yyj1206 > l + yyj1216++ + if yyhl1216 { + yyb1216 = yyj1216 > l } else { - yyb1206 = r.CheckBreak() + yyb1216 = r.CheckBreak() } - if yyb1206 { + if yyb1216 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10428,17 +10510,17 @@ func (x *VolumeConfiguration) codecDecodeSelfFromArray(l int, d *codec1978.Decod x.FlexVolumePluginDir = string(r.DecodeString()) } for { - yyj1206++ - if yyhl1206 { - yyb1206 = yyj1206 > l + yyj1216++ + if yyhl1216 { + yyb1216 = yyj1216 > l } else { - yyb1206 = r.CheckBreak() + yyb1216 = r.CheckBreak() } - if yyb1206 { + if yyb1216 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1206-1, "") + z.DecStructFieldNotFound(yyj1216-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -10450,33 +10532,33 @@ func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Enc if x == nil { r.EncodeNil() } else { - yym1211 := z.EncBinary() - _ = yym1211 + yym1221 := z.EncBinary() + _ = yym1221 if false { } else if z.HasExtensions() && z.EncExt(x) { } else { - yysep1212 := !z.EncBinary() - yy2arr1212 := z.EncBasicHandle().StructToArray - var yyq1212 [7]bool - _, _, _ = yysep1212, yyq1212, yy2arr1212 - const yyr1212 bool = false - var yynn1212 int - if yyr1212 || yy2arr1212 { + yysep1222 := !z.EncBinary() + yy2arr1222 := z.EncBasicHandle().StructToArray + var yyq1222 [7]bool + _, _, _ = yysep1222, yyq1222, yy2arr1222 + const yyr1222 bool = false + var yynn1222 int + if yyr1222 || yy2arr1222 { r.EncodeArrayStart(7) } else { - yynn1212 = 7 - for _, b := range yyq1212 { + yynn1222 = 7 + for _, b := range yyq1222 { if b { - yynn1212++ + yynn1222++ } } - r.EncodeMapStart(yynn1212) - yynn1212 = 0 + r.EncodeMapStart(yynn1222) + yynn1222 = 0 } - if yyr1212 || yy2arr1212 { + if yyr1222 || yy2arr1222 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1214 := z.EncBinary() - _ = yym1214 + yym1224 := z.EncBinary() + _ = yym1224 if false { } else { r.EncodeInt(int64(x.MaximumRetry)) @@ -10485,17 +10567,17 @@ func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Enc z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("maximumRetry")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1215 := z.EncBinary() - _ = yym1215 + yym1225 := z.EncBinary() + _ = yym1225 if false { } else { r.EncodeInt(int64(x.MaximumRetry)) } } - if yyr1212 || yy2arr1212 { + if yyr1222 || yy2arr1222 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1217 := z.EncBinary() - _ = yym1217 + yym1227 := z.EncBinary() + _ = yym1227 if false { } else { r.EncodeInt(int64(x.MinimumTimeoutNFS)) @@ -10504,17 +10586,17 @@ func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Enc z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("minimumTimeoutNFS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1218 := z.EncBinary() - _ = yym1218 + yym1228 := z.EncBinary() + _ = yym1228 if false { } else { r.EncodeInt(int64(x.MinimumTimeoutNFS)) } } - if yyr1212 || yy2arr1212 { + if yyr1222 || yy2arr1222 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1220 := z.EncBinary() - _ = yym1220 + yym1230 := z.EncBinary() + _ = yym1230 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathNFS)) @@ -10523,17 +10605,17 @@ func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Enc z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podTemplateFilePathNFS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1221 := z.EncBinary() - _ = yym1221 + yym1231 := z.EncBinary() + _ = yym1231 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathNFS)) } } - if yyr1212 || yy2arr1212 { + if yyr1222 || yy2arr1222 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1223 := z.EncBinary() - _ = yym1223 + yym1233 := z.EncBinary() + _ = yym1233 if false { } else { r.EncodeInt(int64(x.IncrementTimeoutNFS)) @@ -10542,17 +10624,17 @@ func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Enc z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("incrementTimeoutNFS")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1224 := z.EncBinary() - _ = yym1224 + yym1234 := z.EncBinary() + _ = yym1234 if false { } else { r.EncodeInt(int64(x.IncrementTimeoutNFS)) } } - if yyr1212 || yy2arr1212 { + if yyr1222 || yy2arr1222 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1226 := z.EncBinary() - _ = yym1226 + yym1236 := z.EncBinary() + _ = yym1236 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathHostPath)) @@ -10561,17 +10643,17 @@ func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Enc z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("podTemplateFilePathHostPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1227 := z.EncBinary() - _ = yym1227 + yym1237 := z.EncBinary() + _ = yym1237 if false { } else { r.EncodeString(codecSelferC_UTF81234, string(x.PodTemplateFilePathHostPath)) } } - if yyr1212 || yy2arr1212 { + if yyr1222 || yy2arr1222 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1229 := z.EncBinary() - _ = yym1229 + yym1239 := z.EncBinary() + _ = yym1239 if false { } else { r.EncodeInt(int64(x.MinimumTimeoutHostPath)) @@ -10580,17 +10662,17 @@ func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Enc z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("minimumTimeoutHostPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1230 := z.EncBinary() - _ = yym1230 + yym1240 := z.EncBinary() + _ = yym1240 if false { } else { r.EncodeInt(int64(x.MinimumTimeoutHostPath)) } } - if yyr1212 || yy2arr1212 { + if yyr1222 || yy2arr1222 { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1232 := z.EncBinary() - _ = yym1232 + yym1242 := z.EncBinary() + _ = yym1242 if false { } else { r.EncodeInt(int64(x.IncrementTimeoutHostPath)) @@ -10599,14 +10681,14 @@ func (x *PersistentVolumeRecyclerConfiguration) CodecEncodeSelf(e *codec1978.Enc z.EncSendContainerState(codecSelfer_containerMapKey1234) r.EncodeString(codecSelferC_UTF81234, string("incrementTimeoutHostPath")) z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1233 := z.EncBinary() - _ = yym1233 + yym1243 := z.EncBinary() + _ = yym1243 if false { } else { r.EncodeInt(int64(x.IncrementTimeoutHostPath)) } } - if yyr1212 || yy2arr1212 { + if yyr1222 || yy2arr1222 { z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } else { z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -10619,25 +10701,25 @@ func (x *PersistentVolumeRecyclerConfiguration) CodecDecodeSelf(d *codec1978.Dec var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yym1234 := z.DecBinary() - _ = yym1234 + yym1244 := z.DecBinary() + _ = yym1244 if false { } else if z.HasExtensions() && z.DecExt(x) { } else { - yyct1235 := r.ContainerType() - if yyct1235 == codecSelferValueTypeMap1234 { - yyl1235 := r.ReadMapStart() - if yyl1235 == 0 { + yyct1245 := r.ContainerType() + if yyct1245 == codecSelferValueTypeMap1234 { + yyl1245 := r.ReadMapStart() + if yyl1245 == 0 { z.DecSendContainerState(codecSelfer_containerMapEnd1234) } else { - x.codecDecodeSelfFromMap(yyl1235, d) + x.codecDecodeSelfFromMap(yyl1245, d) } - } else if yyct1235 == codecSelferValueTypeArray1234 { - yyl1235 := r.ReadArrayStart() - if yyl1235 == 0 { + } else if yyct1245 == codecSelferValueTypeArray1234 { + yyl1245 := r.ReadArrayStart() + if yyl1245 == 0 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } else { - x.codecDecodeSelfFromArray(yyl1235, d) + x.codecDecodeSelfFromArray(yyl1245, d) } } else { panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) @@ -10649,12 +10731,12 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromMap(l int, d var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yys1236Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1236Slc - var yyhl1236 bool = l >= 0 - for yyj1236 := 0; ; yyj1236++ { - if yyhl1236 { - if yyj1236 >= l { + var yys1246Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys1246Slc + var yyhl1246 bool = l >= 0 + for yyj1246 := 0; ; yyj1246++ { + if yyhl1246 { + if yyj1246 >= l { break } } else { @@ -10663,10 +10745,10 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromMap(l int, d } } z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1236Slc = r.DecodeBytes(yys1236Slc, true, true) - yys1236 := string(yys1236Slc) + yys1246Slc = r.DecodeBytes(yys1246Slc, true, true) + yys1246 := string(yys1246Slc) z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1236 { + switch yys1246 { case "maximumRetry": if r.TryDecodeAsNil() { x.MaximumRetry = 0 @@ -10710,9 +10792,9 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromMap(l int, d x.IncrementTimeoutHostPath = int32(r.DecodeInt(32)) } default: - z.DecStructFieldNotFound(-1, yys1236) - } // end switch yys1236 - } // end for yyj1236 + z.DecStructFieldNotFound(-1, yys1246) + } // end switch yys1246 + } // end for yyj1246 z.DecSendContainerState(codecSelfer_containerMapEnd1234) } @@ -10720,16 +10802,16 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - var yyj1244 int - var yyb1244 bool - var yyhl1244 bool = l >= 0 - yyj1244++ - if yyhl1244 { - yyb1244 = yyj1244 > l + var yyj1254 int + var yyb1254 bool + var yyhl1254 bool = l >= 0 + yyj1254++ + if yyhl1254 { + yyb1254 = yyj1254 > l } else { - yyb1244 = r.CheckBreak() + yyb1254 = r.CheckBreak() } - if yyb1244 { + if yyb1254 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10739,13 +10821,13 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, } else { x.MaximumRetry = int32(r.DecodeInt(32)) } - yyj1244++ - if yyhl1244 { - yyb1244 = yyj1244 > l + yyj1254++ + if yyhl1254 { + yyb1254 = yyj1254 > l } else { - yyb1244 = r.CheckBreak() + yyb1254 = r.CheckBreak() } - if yyb1244 { + if yyb1254 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10755,13 +10837,13 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, } else { x.MinimumTimeoutNFS = int32(r.DecodeInt(32)) } - yyj1244++ - if yyhl1244 { - yyb1244 = yyj1244 > l + yyj1254++ + if yyhl1254 { + yyb1254 = yyj1254 > l } else { - yyb1244 = r.CheckBreak() + yyb1254 = r.CheckBreak() } - if yyb1244 { + if yyb1254 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10771,13 +10853,13 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, } else { x.PodTemplateFilePathNFS = string(r.DecodeString()) } - yyj1244++ - if yyhl1244 { - yyb1244 = yyj1244 > l + yyj1254++ + if yyhl1254 { + yyb1254 = yyj1254 > l } else { - yyb1244 = r.CheckBreak() + yyb1254 = r.CheckBreak() } - if yyb1244 { + if yyb1254 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10787,13 +10869,13 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, } else { x.IncrementTimeoutNFS = int32(r.DecodeInt(32)) } - yyj1244++ - if yyhl1244 { - yyb1244 = yyj1244 > l + yyj1254++ + if yyhl1254 { + yyb1254 = yyj1254 > l } else { - yyb1244 = r.CheckBreak() + yyb1254 = r.CheckBreak() } - if yyb1244 { + if yyb1254 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10803,13 +10885,13 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, } else { x.PodTemplateFilePathHostPath = string(r.DecodeString()) } - yyj1244++ - if yyhl1244 { - yyb1244 = yyj1244 > l + yyj1254++ + if yyhl1254 { + yyb1254 = yyj1254 > l } else { - yyb1244 = r.CheckBreak() + yyb1254 = r.CheckBreak() } - if yyb1244 { + if yyb1254 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10819,13 +10901,13 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, } else { x.MinimumTimeoutHostPath = int32(r.DecodeInt(32)) } - yyj1244++ - if yyhl1244 { - yyb1244 = yyj1244 > l + yyj1254++ + if yyhl1254 { + yyb1254 = yyj1254 > l } else { - yyb1244 = r.CheckBreak() + yyb1254 = r.CheckBreak() } - if yyb1244 { + if yyb1254 { z.DecSendContainerState(codecSelfer_containerArrayEnd1234) return } @@ -10836,17 +10918,17 @@ func (x *PersistentVolumeRecyclerConfiguration) codecDecodeSelfFromArray(l int, x.IncrementTimeoutHostPath = int32(r.DecodeInt(32)) } for { - yyj1244++ - if yyhl1244 { - yyb1244 = yyj1244 > l + yyj1254++ + if yyhl1254 { + yyb1254 = yyj1254 > l } else { - yyb1244 = r.CheckBreak() + yyb1254 = r.CheckBreak() } - if yyb1244 { + if yyb1254 { break } z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1244-1, "") + z.DecStructFieldNotFound(yyj1254-1, "") } z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -10856,20 +10938,20 @@ func (x codecSelfer1234) encconfig_ConfigurationMap(v pkg2_config.ConfigurationM z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeMapStart(len(v)) - for yyk1252, yyv1252 := range v { + for yyk1262, yyv1262 := range v { z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym1253 := z.EncBinary() - _ = yym1253 + yym1263 := z.EncBinary() + _ = yym1263 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk1252)) + r.EncodeString(codecSelferC_UTF81234, string(yyk1262)) } z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1254 := z.EncBinary() - _ = yym1254 + yym1264 := z.EncBinary() + _ = yym1264 if false { } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1252)) + r.EncodeString(codecSelferC_UTF81234, string(yyv1262)) } } z.EncSendContainerState(codecSelfer_containerMapEnd1234) @@ -10880,63 +10962,63 @@ func (x codecSelfer1234) decconfig_ConfigurationMap(v *pkg2_config.Configuration z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1255 := *v - yyl1255 := r.ReadMapStart() - yybh1255 := z.DecBasicHandle() - if yyv1255 == nil { - yyrl1255, _ := z.DecInferLen(yyl1255, yybh1255.MaxInitLen, 32) - yyv1255 = make(map[string]string, yyrl1255) - *v = yyv1255 - } - var yymk1255 string - var yymv1255 string - var yymg1255 bool - if yybh1255.MapValueReset { - } - if yyl1255 > 0 { - for yyj1255 := 0; yyj1255 < yyl1255; yyj1255++ { + yyv1265 := *v + yyl1265 := r.ReadMapStart() + yybh1265 := z.DecBasicHandle() + if yyv1265 == nil { + yyrl1265, _ := z.DecInferLen(yyl1265, yybh1265.MaxInitLen, 32) + yyv1265 = make(map[string]string, yyrl1265) + *v = yyv1265 + } + var yymk1265 string + var yymv1265 string + var yymg1265 bool + if yybh1265.MapValueReset { + } + if yyl1265 > 0 { + for yyj1265 := 0; yyj1265 < yyl1265; yyj1265++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk1255 = "" + yymk1265 = "" } else { - yymk1255 = string(r.DecodeString()) + yymk1265 = string(r.DecodeString()) } - if yymg1255 { - yymv1255 = yyv1255[yymk1255] + if yymg1265 { + yymv1265 = yyv1265[yymk1265] } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv1255 = "" + yymv1265 = "" } else { - yymv1255 = string(r.DecodeString()) + yymv1265 = string(r.DecodeString()) } - if yyv1255 != nil { - yyv1255[yymk1255] = yymv1255 + if yyv1265 != nil { + yyv1265[yymk1265] = yymv1265 } } - } else if yyl1255 < 0 { - for yyj1255 := 0; !r.CheckBreak(); yyj1255++ { + } else if yyl1265 < 0 { + for yyj1265 := 0; !r.CheckBreak(); yyj1265++ { z.DecSendContainerState(codecSelfer_containerMapKey1234) if r.TryDecodeAsNil() { - yymk1255 = "" + yymk1265 = "" } else { - yymk1255 = string(r.DecodeString()) + yymk1265 = string(r.DecodeString()) } - if yymg1255 { - yymv1255 = yyv1255[yymk1255] + if yymg1265 { + yymv1265 = yyv1265[yymk1265] } z.DecSendContainerState(codecSelfer_containerMapValue1234) if r.TryDecodeAsNil() { - yymv1255 = "" + yymv1265 = "" } else { - yymv1255 = string(r.DecodeString()) + yymv1265 = string(r.DecodeString()) } - if yyv1255 != nil { - yyv1255[yymk1255] = yymv1255 + if yyv1265 != nil { + yyv1265[yymk1265] = yymv1265 } } } // else len==0: TODO: Should we clear map entries? diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/types.go index f64c30a5cb82..9eadd6b746d4 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/types.go @@ -257,8 +257,14 @@ type KubeletConfiguration struct { // computed (such as IPSEC). NetworkPluginMTU int32 `json:"networkPluginMTU"` // networkPluginDir is the full path of the directory in which to search - // for network plugins + // for network plugins (and, for backwards-compat, CNI config files) NetworkPluginDir string `json:"networkPluginDir"` + // CNIConfDir is the full path of the directory in which to search for + // CNI config files + CNIConfDir string `json:"cniConfDir"` + // CNIBinDir is the full path of the directory in which to search for + // CNI plugin binaries + CNIBinDir string `json:"cniBinDir"` // volumePluginDir is the full path of the directory in which to search // for additional third party volume plugins VolumePluginDir string `json:"volumePluginDir"` @@ -398,12 +404,12 @@ type KubeletConfiguration struct { // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs // that describe resources reserved for non-kubernetes components. // Currently only cpu and memory are supported. [default=none] - // See http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md for more detail. + // See http://releases.k8s.io/release-1.4/docs/user-guide/compute-resources.md for more detail. SystemReserved utilconfig.ConfigurationMap `json:"systemReserved"` // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs // that describe resources reserved for kubernetes system components. // Currently only cpu and memory are supported. [default=none] - // See http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md for more detail. + // See http://releases.k8s.io/release-1.4/docs/user-guide/compute-resources.md for more detail. KubeReserved utilconfig.ConfigurationMap `json:"kubeReserved"` // Default behaviour for kernel tuning ProtectKernelDefaults bool `json:"protectKernelDefaults"` diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/defaults.go b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/defaults.go index 3c889d912dfe..3779bb8aadb2 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/defaults.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/defaults.go @@ -249,9 +249,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) { if obj.MinimumGCAge == zeroDuration { obj.MinimumGCAge = unversioned.Duration{Duration: 0} } - if obj.NetworkPluginDir == "" { - obj.NetworkPluginDir = "/usr/libexec/kubernetes/kubelet-plugins/net/exec/" - } if obj.NonMasqueradeCIDR == "" { obj.NonMasqueradeCIDR = "10.0.0.0/8" } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/types.go index adb6d9b7ce85..8c206efb5080 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/types.go @@ -308,8 +308,14 @@ type KubeletConfiguration struct { // various events in kubelet/pod lifecycle NetworkPluginName string `json:"networkPluginName"` // networkPluginDir is the full path of the directory in which to search - // for network plugins + // for network plugins (and, for backwards-compat, CNI config files) NetworkPluginDir string `json:"networkPluginDir"` + // CNIConfDir is the full path of the directory in which to search for + // CNI config files + CNIConfDir string `json:"cniConfDir"` + // CNIBinDir is the full path of the directory in which to search for + // CNI plugin binaries + CNIBinDir string `json:"cniBinDir"` // networkPluginMTU is the MTU to be passed to the network plugin, // and overrides the default MTU for cases where it cannot be automatically // computed (such as IPSEC). @@ -453,12 +459,12 @@ type KubeletConfiguration struct { // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs // that describe resources reserved for non-kubernetes components. // Currently only cpu and memory are supported. [default=none] - // See http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md for more detail. + // See http://releases.k8s.io/release-1.4/docs/user-guide/compute-resources.md for more detail. SystemReserved map[string]string `json:"systemReserved"` // A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs // that describe resources reserved for kubernetes system components. // Currently only cpu and memory are supported. [default=none] - // See http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md for more detail. + // See http://releases.k8s.io/release-1.4/docs/user-guide/compute-resources.md for more detail. KubeReserved map[string]string `json:"kubeReserved"` // Default behaviour for kernel tuning ProtectKernelDefaults bool `json:"protectKernelDefaults"` diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go index 468941a4f2c9..bfd31b1252f2 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/zz_generated.conversion.go @@ -233,6 +233,8 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod out.NetworkPluginName = in.NetworkPluginName out.NetworkPluginDir = in.NetworkPluginDir + out.CNIConfDir = in.CNIConfDir + out.CNIBinDir = in.CNIBinDir out.NetworkPluginMTU = in.NetworkPluginMTU out.VolumePluginDir = in.VolumePluginDir out.CloudProvider = in.CloudProvider @@ -412,6 +414,8 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu out.NetworkPluginName = in.NetworkPluginName out.NetworkPluginMTU = in.NetworkPluginMTU out.NetworkPluginDir = in.NetworkPluginDir + out.CNIConfDir = in.CNIConfDir + out.CNIBinDir = in.CNIBinDir out.VolumePluginDir = in.VolumePluginDir out.CloudProvider = in.CloudProvider out.CloudConfigFile = in.CloudConfigFile diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go index 22aa9c8ed923..74cc2bdc013a 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go @@ -239,6 +239,8 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c * out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod out.NetworkPluginName = in.NetworkPluginName out.NetworkPluginDir = in.NetworkPluginDir + out.CNIConfDir = in.CNIConfDir + out.CNIBinDir = in.CNIBinDir out.NetworkPluginMTU = in.NetworkPluginMTU out.VolumePluginDir = in.VolumePluginDir out.CloudProvider = in.CloudProvider diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/zz_generated.deepcopy.go index aeb78ba8c963..399c00b5e72b 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/componentconfig/zz_generated.deepcopy.go @@ -259,6 +259,8 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface out.NetworkPluginName = in.NetworkPluginName out.NetworkPluginMTU = in.NetworkPluginMTU out.NetworkPluginDir = in.NetworkPluginDir + out.CNIConfDir = in.CNIConfDir + out.CNIBinDir = in.CNIBinDir out.VolumePluginDir = in.VolumePluginDir out.CloudProvider = in.CloudProvider out.CloudConfigFile = in.CloudConfigFile diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/register.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/register.go index 849279982c2f..37180c4e808f 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/register.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/register.go @@ -76,8 +76,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, - &StorageClass{}, - &StorageClassList{}, ) return nil } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/types.generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/types.generated.go index aa306f2cc6ba..681a3149f84c 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/types.generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/types.generated.go @@ -15566,727 +15566,15 @@ func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1292 := z.EncBinary() - _ = yym1292 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1293 := !z.EncBinary() - yy2arr1293 := z.EncBasicHandle().StructToArray - var yyq1293 [5]bool - _, _, _ = yysep1293, yyq1293, yy2arr1293 - const yyr1293 bool = false - yyq1293[0] = x.Kind != "" - yyq1293[1] = x.APIVersion != "" - yyq1293[2] = true - yyq1293[4] = len(x.Parameters) != 0 - var yynn1293 int - if yyr1293 || yy2arr1293 { - r.EncodeArrayStart(5) - } else { - yynn1293 = 1 - for _, b := range yyq1293 { - if b { - yynn1293++ - } - } - r.EncodeMapStart(yynn1293) - yynn1293 = 0 - } - if yyr1293 || yy2arr1293 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1293[0] { - yym1295 := z.EncBinary() - _ = yym1295 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1293[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1296 := z.EncBinary() - _ = yym1296 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1293 || yy2arr1293 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1293[1] { - yym1298 := z.EncBinary() - _ = yym1298 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1293[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1299 := z.EncBinary() - _ = yym1299 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1293 || yy2arr1293 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1293[2] { - yy1301 := &x.ObjectMeta - yy1301.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq1293[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1302 := &x.ObjectMeta - yy1302.CodecEncodeSelf(e) - } - } - if yyr1293 || yy2arr1293 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1304 := z.EncBinary() - _ = yym1304 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("provisioner")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1305 := z.EncBinary() - _ = yym1305 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) - } - } - if yyr1293 || yy2arr1293 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1293[4] { - if x.Parameters == nil { - r.EncodeNil() - } else { - yym1307 := z.EncBinary() - _ = yym1307 - if false { - } else { - z.F.EncMapStringStringV(x.Parameters, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1293[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parameters")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parameters == nil { - r.EncodeNil() - } else { - yym1308 := z.EncBinary() - _ = yym1308 - if false { - } else { - z.F.EncMapStringStringV(x.Parameters, false, e) - } - } - } - } - if yyr1293 || yy2arr1293 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1309 := z.DecBinary() - _ = yym1309 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1310 := r.ContainerType() - if yyct1310 == codecSelferValueTypeMap1234 { - yyl1310 := r.ReadMapStart() - if yyl1310 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1310, d) - } - } else if yyct1310 == codecSelferValueTypeArray1234 { - yyl1310 := r.ReadArrayStart() - if yyl1310 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1310, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1311Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1311Slc - var yyhl1311 bool = l >= 0 - for yyj1311 := 0; ; yyj1311++ { - if yyhl1311 { - if yyj1311 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1311Slc = r.DecodeBytes(yys1311Slc, true, true) - yys1311 := string(yys1311Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1311 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv1314 := &x.ObjectMeta - yyv1314.CodecDecodeSelf(d) - } - case "provisioner": - if r.TryDecodeAsNil() { - x.Provisioner = "" - } else { - x.Provisioner = string(r.DecodeString()) - } - case "parameters": - if r.TryDecodeAsNil() { - x.Parameters = nil - } else { - yyv1316 := &x.Parameters - yym1317 := z.DecBinary() - _ = yym1317 - if false { - } else { - z.F.DecMapStringStringX(yyv1316, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1311) - } // end switch yys1311 - } // end for yyj1311 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1318 int - var yyb1318 bool - var yyhl1318 bool = l >= 0 - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l - } else { - yyb1318 = r.CheckBreak() - } - if yyb1318 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l - } else { - yyb1318 = r.CheckBreak() - } - if yyb1318 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l - } else { - yyb1318 = r.CheckBreak() - } - if yyb1318 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_api.ObjectMeta{} - } else { - yyv1321 := &x.ObjectMeta - yyv1321.CodecDecodeSelf(d) - } - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l - } else { - yyb1318 = r.CheckBreak() - } - if yyb1318 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Provisioner = "" - } else { - x.Provisioner = string(r.DecodeString()) - } - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l - } else { - yyb1318 = r.CheckBreak() - } - if yyb1318 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Parameters = nil - } else { - yyv1323 := &x.Parameters - yym1324 := z.DecBinary() - _ = yym1324 - if false { - } else { - z.F.DecMapStringStringX(yyv1323, false, d) - } - } - for { - yyj1318++ - if yyhl1318 { - yyb1318 = yyj1318 > l - } else { - yyb1318 = r.CheckBreak() - } - if yyb1318 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1318-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1325 := z.EncBinary() - _ = yym1325 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1326 := !z.EncBinary() - yy2arr1326 := z.EncBasicHandle().StructToArray - var yyq1326 [4]bool - _, _, _ = yysep1326, yyq1326, yy2arr1326 - const yyr1326 bool = false - yyq1326[0] = x.Kind != "" - yyq1326[1] = x.APIVersion != "" - yyq1326[2] = true - var yynn1326 int - if yyr1326 || yy2arr1326 { - r.EncodeArrayStart(4) - } else { - yynn1326 = 1 - for _, b := range yyq1326 { - if b { - yynn1326++ - } - } - r.EncodeMapStart(yynn1326) - yynn1326 = 0 - } - if yyr1326 || yy2arr1326 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1326[0] { - yym1328 := z.EncBinary() - _ = yym1328 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1326[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1329 := z.EncBinary() - _ = yym1329 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1326 || yy2arr1326 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1326[1] { - yym1331 := z.EncBinary() - _ = yym1331 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1326[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1332 := z.EncBinary() - _ = yym1332 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1326 || yy2arr1326 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1326[2] { - yy1334 := &x.ListMeta - yym1335 := z.EncBinary() - _ = yym1335 - if false { - } else if z.HasExtensions() && z.EncExt(yy1334) { - } else { - z.EncFallback(yy1334) - } - } else { - r.EncodeNil() - } - } else { - if yyq1326[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1336 := &x.ListMeta - yym1337 := z.EncBinary() - _ = yym1337 - if false { - } else if z.HasExtensions() && z.EncExt(yy1336) { - } else { - z.EncFallback(yy1336) - } - } - } - if yyr1326 || yy2arr1326 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1339 := z.EncBinary() - _ = yym1339 - if false { - } else { - h.encSliceStorageClass(([]StorageClass)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1340 := z.EncBinary() - _ = yym1340 - if false { - } else { - h.encSliceStorageClass(([]StorageClass)(x.Items), e) - } - } - } - if yyr1326 || yy2arr1326 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1341 := z.DecBinary() - _ = yym1341 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1342 := r.ContainerType() - if yyct1342 == codecSelferValueTypeMap1234 { - yyl1342 := r.ReadMapStart() - if yyl1342 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1342, d) - } - } else if yyct1342 == codecSelferValueTypeArray1234 { - yyl1342 := r.ReadArrayStart() - if yyl1342 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1342, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1343Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1343Slc - var yyhl1343 bool = l >= 0 - for yyj1343 := 0; ; yyj1343++ { - if yyhl1343 { - if yyj1343 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1343Slc = r.DecodeBytes(yys1343Slc, true, true) - yys1343 := string(yys1343Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1343 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv1346 := &x.ListMeta - yym1347 := z.DecBinary() - _ = yym1347 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1346) { - } else { - z.DecFallback(yyv1346, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1348 := &x.Items - yym1349 := z.DecBinary() - _ = yym1349 - if false { - } else { - h.decSliceStorageClass((*[]StorageClass)(yyv1348), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1343) - } // end switch yys1343 - } // end for yyj1343 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1350 int - var yyb1350 bool - var yyhl1350 bool = l >= 0 - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l - } else { - yyb1350 = r.CheckBreak() - } - if yyb1350 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l - } else { - yyb1350 = r.CheckBreak() - } - if yyb1350 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l - } else { - yyb1350 = r.CheckBreak() - } - if yyb1350 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv1353 := &x.ListMeta - yym1354 := z.DecBinary() - _ = yym1354 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1353) { - } else { - z.DecFallback(yyv1353, false) - } - } - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l - } else { - yyb1350 = r.CheckBreak() - } - if yyb1350 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1355 := &x.Items - yym1356 := z.DecBinary() - _ = yym1356 - if false { - } else { - h.decSliceStorageClass((*[]StorageClass)(yyv1355), d) - } - } - for { - yyj1350++ - if yyhl1350 { - yyb1350 = yyj1350 > l - } else { - yyb1350 = r.CheckBreak() - } - if yyb1350 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1350-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1357 := range v { + for _, yyv1292 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1358 := &yyv1357 - yy1358.CodecEncodeSelf(e) + yy1293 := &yyv1292 + yy1293.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -16296,83 +15584,83 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1359 := *v - yyh1359, yyl1359 := z.DecSliceHelperStart() - var yyc1359 bool - if yyl1359 == 0 { - if yyv1359 == nil { - yyv1359 = []CustomMetricTarget{} - yyc1359 = true - } else if len(yyv1359) != 0 { - yyv1359 = yyv1359[:0] - yyc1359 = true + yyv1294 := *v + yyh1294, yyl1294 := z.DecSliceHelperStart() + var yyc1294 bool + if yyl1294 == 0 { + if yyv1294 == nil { + yyv1294 = []CustomMetricTarget{} + yyc1294 = true + } else if len(yyv1294) != 0 { + yyv1294 = yyv1294[:0] + yyc1294 = true } - } else if yyl1359 > 0 { - var yyrr1359, yyrl1359 int - var yyrt1359 bool - if yyl1359 > cap(yyv1359) { + } else if yyl1294 > 0 { + var yyrr1294, yyrl1294 int + var yyrt1294 bool + if yyl1294 > cap(yyv1294) { - yyrg1359 := len(yyv1359) > 0 - yyv21359 := yyv1359 - yyrl1359, yyrt1359 = z.DecInferLen(yyl1359, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1359 { - if yyrl1359 <= cap(yyv1359) { - yyv1359 = yyv1359[:yyrl1359] + yyrg1294 := len(yyv1294) > 0 + yyv21294 := yyv1294 + yyrl1294, yyrt1294 = z.DecInferLen(yyl1294, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1294 { + if yyrl1294 <= cap(yyv1294) { + yyv1294 = yyv1294[:yyrl1294] } else { - yyv1359 = make([]CustomMetricTarget, yyrl1359) + yyv1294 = make([]CustomMetricTarget, yyrl1294) } } else { - yyv1359 = make([]CustomMetricTarget, yyrl1359) + yyv1294 = make([]CustomMetricTarget, yyrl1294) } - yyc1359 = true - yyrr1359 = len(yyv1359) - if yyrg1359 { - copy(yyv1359, yyv21359) + yyc1294 = true + yyrr1294 = len(yyv1294) + if yyrg1294 { + copy(yyv1294, yyv21294) } - } else if yyl1359 != len(yyv1359) { - yyv1359 = yyv1359[:yyl1359] - yyc1359 = true + } else if yyl1294 != len(yyv1294) { + yyv1294 = yyv1294[:yyl1294] + yyc1294 = true } - yyj1359 := 0 - for ; yyj1359 < yyrr1359; yyj1359++ { - yyh1359.ElemContainerState(yyj1359) + yyj1294 := 0 + for ; yyj1294 < yyrr1294; yyj1294++ { + yyh1294.ElemContainerState(yyj1294) if r.TryDecodeAsNil() { - yyv1359[yyj1359] = CustomMetricTarget{} + yyv1294[yyj1294] = CustomMetricTarget{} } else { - yyv1360 := &yyv1359[yyj1359] - yyv1360.CodecDecodeSelf(d) + yyv1295 := &yyv1294[yyj1294] + yyv1295.CodecDecodeSelf(d) } } - if yyrt1359 { - for ; yyj1359 < yyl1359; yyj1359++ { - yyv1359 = append(yyv1359, CustomMetricTarget{}) - yyh1359.ElemContainerState(yyj1359) + if yyrt1294 { + for ; yyj1294 < yyl1294; yyj1294++ { + yyv1294 = append(yyv1294, CustomMetricTarget{}) + yyh1294.ElemContainerState(yyj1294) if r.TryDecodeAsNil() { - yyv1359[yyj1359] = CustomMetricTarget{} + yyv1294[yyj1294] = CustomMetricTarget{} } else { - yyv1361 := &yyv1359[yyj1359] - yyv1361.CodecDecodeSelf(d) + yyv1296 := &yyv1294[yyj1294] + yyv1296.CodecDecodeSelf(d) } } } } else { - yyj1359 := 0 - for ; !r.CheckBreak(); yyj1359++ { + yyj1294 := 0 + for ; !r.CheckBreak(); yyj1294++ { - if yyj1359 >= len(yyv1359) { - yyv1359 = append(yyv1359, CustomMetricTarget{}) // var yyz1359 CustomMetricTarget - yyc1359 = true + if yyj1294 >= len(yyv1294) { + yyv1294 = append(yyv1294, CustomMetricTarget{}) // var yyz1294 CustomMetricTarget + yyc1294 = true } - yyh1359.ElemContainerState(yyj1359) - if yyj1359 < len(yyv1359) { + yyh1294.ElemContainerState(yyj1294) + if yyj1294 < len(yyv1294) { if r.TryDecodeAsNil() { - yyv1359[yyj1359] = CustomMetricTarget{} + yyv1294[yyj1294] = CustomMetricTarget{} } else { - yyv1362 := &yyv1359[yyj1359] - yyv1362.CodecDecodeSelf(d) + yyv1297 := &yyv1294[yyj1294] + yyv1297.CodecDecodeSelf(d) } } else { @@ -16380,17 +15668,17 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * } } - if yyj1359 < len(yyv1359) { - yyv1359 = yyv1359[:yyj1359] - yyc1359 = true - } else if yyj1359 == 0 && yyv1359 == nil { - yyv1359 = []CustomMetricTarget{} - yyc1359 = true + if yyj1294 < len(yyv1294) { + yyv1294 = yyv1294[:yyj1294] + yyc1294 = true + } else if yyj1294 == 0 && yyv1294 == nil { + yyv1294 = []CustomMetricTarget{} + yyc1294 = true } } - yyh1359.End() - if yyc1359 { - *v = yyv1359 + yyh1294.End() + if yyc1294 { + *v = yyv1294 } } @@ -16399,10 +15687,10 @@ func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurre z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1363 := range v { + for _, yyv1298 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1364 := &yyv1363 - yy1364.CodecEncodeSelf(e) + yy1299 := &yyv1298 + yy1299.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -16412,83 +15700,83 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1365 := *v - yyh1365, yyl1365 := z.DecSliceHelperStart() - var yyc1365 bool - if yyl1365 == 0 { - if yyv1365 == nil { - yyv1365 = []CustomMetricCurrentStatus{} - yyc1365 = true - } else if len(yyv1365) != 0 { - yyv1365 = yyv1365[:0] - yyc1365 = true + yyv1300 := *v + yyh1300, yyl1300 := z.DecSliceHelperStart() + var yyc1300 bool + if yyl1300 == 0 { + if yyv1300 == nil { + yyv1300 = []CustomMetricCurrentStatus{} + yyc1300 = true + } else if len(yyv1300) != 0 { + yyv1300 = yyv1300[:0] + yyc1300 = true } - } else if yyl1365 > 0 { - var yyrr1365, yyrl1365 int - var yyrt1365 bool - if yyl1365 > cap(yyv1365) { + } else if yyl1300 > 0 { + var yyrr1300, yyrl1300 int + var yyrt1300 bool + if yyl1300 > cap(yyv1300) { - yyrg1365 := len(yyv1365) > 0 - yyv21365 := yyv1365 - yyrl1365, yyrt1365 = z.DecInferLen(yyl1365, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1365 { - if yyrl1365 <= cap(yyv1365) { - yyv1365 = yyv1365[:yyrl1365] + yyrg1300 := len(yyv1300) > 0 + yyv21300 := yyv1300 + yyrl1300, yyrt1300 = z.DecInferLen(yyl1300, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1300 { + if yyrl1300 <= cap(yyv1300) { + yyv1300 = yyv1300[:yyrl1300] } else { - yyv1365 = make([]CustomMetricCurrentStatus, yyrl1365) + yyv1300 = make([]CustomMetricCurrentStatus, yyrl1300) } } else { - yyv1365 = make([]CustomMetricCurrentStatus, yyrl1365) + yyv1300 = make([]CustomMetricCurrentStatus, yyrl1300) } - yyc1365 = true - yyrr1365 = len(yyv1365) - if yyrg1365 { - copy(yyv1365, yyv21365) + yyc1300 = true + yyrr1300 = len(yyv1300) + if yyrg1300 { + copy(yyv1300, yyv21300) } - } else if yyl1365 != len(yyv1365) { - yyv1365 = yyv1365[:yyl1365] - yyc1365 = true + } else if yyl1300 != len(yyv1300) { + yyv1300 = yyv1300[:yyl1300] + yyc1300 = true } - yyj1365 := 0 - for ; yyj1365 < yyrr1365; yyj1365++ { - yyh1365.ElemContainerState(yyj1365) + yyj1300 := 0 + for ; yyj1300 < yyrr1300; yyj1300++ { + yyh1300.ElemContainerState(yyj1300) if r.TryDecodeAsNil() { - yyv1365[yyj1365] = CustomMetricCurrentStatus{} + yyv1300[yyj1300] = CustomMetricCurrentStatus{} } else { - yyv1366 := &yyv1365[yyj1365] - yyv1366.CodecDecodeSelf(d) + yyv1301 := &yyv1300[yyj1300] + yyv1301.CodecDecodeSelf(d) } } - if yyrt1365 { - for ; yyj1365 < yyl1365; yyj1365++ { - yyv1365 = append(yyv1365, CustomMetricCurrentStatus{}) - yyh1365.ElemContainerState(yyj1365) + if yyrt1300 { + for ; yyj1300 < yyl1300; yyj1300++ { + yyv1300 = append(yyv1300, CustomMetricCurrentStatus{}) + yyh1300.ElemContainerState(yyj1300) if r.TryDecodeAsNil() { - yyv1365[yyj1365] = CustomMetricCurrentStatus{} + yyv1300[yyj1300] = CustomMetricCurrentStatus{} } else { - yyv1367 := &yyv1365[yyj1365] - yyv1367.CodecDecodeSelf(d) + yyv1302 := &yyv1300[yyj1300] + yyv1302.CodecDecodeSelf(d) } } } } else { - yyj1365 := 0 - for ; !r.CheckBreak(); yyj1365++ { + yyj1300 := 0 + for ; !r.CheckBreak(); yyj1300++ { - if yyj1365 >= len(yyv1365) { - yyv1365 = append(yyv1365, CustomMetricCurrentStatus{}) // var yyz1365 CustomMetricCurrentStatus - yyc1365 = true + if yyj1300 >= len(yyv1300) { + yyv1300 = append(yyv1300, CustomMetricCurrentStatus{}) // var yyz1300 CustomMetricCurrentStatus + yyc1300 = true } - yyh1365.ElemContainerState(yyj1365) - if yyj1365 < len(yyv1365) { + yyh1300.ElemContainerState(yyj1300) + if yyj1300 < len(yyv1300) { if r.TryDecodeAsNil() { - yyv1365[yyj1365] = CustomMetricCurrentStatus{} + yyv1300[yyj1300] = CustomMetricCurrentStatus{} } else { - yyv1368 := &yyv1365[yyj1365] - yyv1368.CodecDecodeSelf(d) + yyv1303 := &yyv1300[yyj1300] + yyv1303.CodecDecodeSelf(d) } } else { @@ -16496,17 +15784,17 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr } } - if yyj1365 < len(yyv1365) { - yyv1365 = yyv1365[:yyj1365] - yyc1365 = true - } else if yyj1365 == 0 && yyv1365 == nil { - yyv1365 = []CustomMetricCurrentStatus{} - yyc1365 = true + if yyj1300 < len(yyv1300) { + yyv1300 = yyv1300[:yyj1300] + yyc1300 = true + } else if yyj1300 == 0 && yyv1300 == nil { + yyv1300 = []CustomMetricCurrentStatus{} + yyc1300 = true } } - yyh1365.End() - if yyc1365 { - *v = yyv1365 + yyh1300.End() + if yyc1300 { + *v = yyv1300 } } @@ -16515,10 +15803,10 @@ func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1369 := range v { + for _, yyv1304 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1370 := &yyv1369 - yy1370.CodecEncodeSelf(e) + yy1305 := &yyv1304 + yy1305.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -16528,83 +15816,83 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1371 := *v - yyh1371, yyl1371 := z.DecSliceHelperStart() - var yyc1371 bool - if yyl1371 == 0 { - if yyv1371 == nil { - yyv1371 = []APIVersion{} - yyc1371 = true - } else if len(yyv1371) != 0 { - yyv1371 = yyv1371[:0] - yyc1371 = true + yyv1306 := *v + yyh1306, yyl1306 := z.DecSliceHelperStart() + var yyc1306 bool + if yyl1306 == 0 { + if yyv1306 == nil { + yyv1306 = []APIVersion{} + yyc1306 = true + } else if len(yyv1306) != 0 { + yyv1306 = yyv1306[:0] + yyc1306 = true } - } else if yyl1371 > 0 { - var yyrr1371, yyrl1371 int - var yyrt1371 bool - if yyl1371 > cap(yyv1371) { + } else if yyl1306 > 0 { + var yyrr1306, yyrl1306 int + var yyrt1306 bool + if yyl1306 > cap(yyv1306) { - yyrg1371 := len(yyv1371) > 0 - yyv21371 := yyv1371 - yyrl1371, yyrt1371 = z.DecInferLen(yyl1371, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1371 { - if yyrl1371 <= cap(yyv1371) { - yyv1371 = yyv1371[:yyrl1371] + yyrg1306 := len(yyv1306) > 0 + yyv21306 := yyv1306 + yyrl1306, yyrt1306 = z.DecInferLen(yyl1306, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1306 { + if yyrl1306 <= cap(yyv1306) { + yyv1306 = yyv1306[:yyrl1306] } else { - yyv1371 = make([]APIVersion, yyrl1371) + yyv1306 = make([]APIVersion, yyrl1306) } } else { - yyv1371 = make([]APIVersion, yyrl1371) + yyv1306 = make([]APIVersion, yyrl1306) } - yyc1371 = true - yyrr1371 = len(yyv1371) - if yyrg1371 { - copy(yyv1371, yyv21371) + yyc1306 = true + yyrr1306 = len(yyv1306) + if yyrg1306 { + copy(yyv1306, yyv21306) } - } else if yyl1371 != len(yyv1371) { - yyv1371 = yyv1371[:yyl1371] - yyc1371 = true + } else if yyl1306 != len(yyv1306) { + yyv1306 = yyv1306[:yyl1306] + yyc1306 = true } - yyj1371 := 0 - for ; yyj1371 < yyrr1371; yyj1371++ { - yyh1371.ElemContainerState(yyj1371) + yyj1306 := 0 + for ; yyj1306 < yyrr1306; yyj1306++ { + yyh1306.ElemContainerState(yyj1306) if r.TryDecodeAsNil() { - yyv1371[yyj1371] = APIVersion{} + yyv1306[yyj1306] = APIVersion{} } else { - yyv1372 := &yyv1371[yyj1371] - yyv1372.CodecDecodeSelf(d) + yyv1307 := &yyv1306[yyj1306] + yyv1307.CodecDecodeSelf(d) } } - if yyrt1371 { - for ; yyj1371 < yyl1371; yyj1371++ { - yyv1371 = append(yyv1371, APIVersion{}) - yyh1371.ElemContainerState(yyj1371) + if yyrt1306 { + for ; yyj1306 < yyl1306; yyj1306++ { + yyv1306 = append(yyv1306, APIVersion{}) + yyh1306.ElemContainerState(yyj1306) if r.TryDecodeAsNil() { - yyv1371[yyj1371] = APIVersion{} + yyv1306[yyj1306] = APIVersion{} } else { - yyv1373 := &yyv1371[yyj1371] - yyv1373.CodecDecodeSelf(d) + yyv1308 := &yyv1306[yyj1306] + yyv1308.CodecDecodeSelf(d) } } } } else { - yyj1371 := 0 - for ; !r.CheckBreak(); yyj1371++ { + yyj1306 := 0 + for ; !r.CheckBreak(); yyj1306++ { - if yyj1371 >= len(yyv1371) { - yyv1371 = append(yyv1371, APIVersion{}) // var yyz1371 APIVersion - yyc1371 = true + if yyj1306 >= len(yyv1306) { + yyv1306 = append(yyv1306, APIVersion{}) // var yyz1306 APIVersion + yyc1306 = true } - yyh1371.ElemContainerState(yyj1371) - if yyj1371 < len(yyv1371) { + yyh1306.ElemContainerState(yyj1306) + if yyj1306 < len(yyv1306) { if r.TryDecodeAsNil() { - yyv1371[yyj1371] = APIVersion{} + yyv1306[yyj1306] = APIVersion{} } else { - yyv1374 := &yyv1371[yyj1371] - yyv1374.CodecDecodeSelf(d) + yyv1309 := &yyv1306[yyj1306] + yyv1309.CodecDecodeSelf(d) } } else { @@ -16612,17 +15900,17 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode } } - if yyj1371 < len(yyv1371) { - yyv1371 = yyv1371[:yyj1371] - yyc1371 = true - } else if yyj1371 == 0 && yyv1371 == nil { - yyv1371 = []APIVersion{} - yyc1371 = true + if yyj1306 < len(yyv1306) { + yyv1306 = yyv1306[:yyj1306] + yyc1306 = true + } else if yyj1306 == 0 && yyv1306 == nil { + yyv1306 = []APIVersion{} + yyc1306 = true } } - yyh1371.End() - if yyc1371 { - *v = yyv1371 + yyh1306.End() + if yyc1306 { + *v = yyv1306 } } @@ -16631,10 +15919,10 @@ func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *c z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1375 := range v { + for _, yyv1310 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1376 := &yyv1375 - yy1376.CodecEncodeSelf(e) + yy1311 := &yyv1310 + yy1311.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -16644,83 +15932,83 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1377 := *v - yyh1377, yyl1377 := z.DecSliceHelperStart() - var yyc1377 bool - if yyl1377 == 0 { - if yyv1377 == nil { - yyv1377 = []ThirdPartyResource{} - yyc1377 = true - } else if len(yyv1377) != 0 { - yyv1377 = yyv1377[:0] - yyc1377 = true + yyv1312 := *v + yyh1312, yyl1312 := z.DecSliceHelperStart() + var yyc1312 bool + if yyl1312 == 0 { + if yyv1312 == nil { + yyv1312 = []ThirdPartyResource{} + yyc1312 = true + } else if len(yyv1312) != 0 { + yyv1312 = yyv1312[:0] + yyc1312 = true } - } else if yyl1377 > 0 { - var yyrr1377, yyrl1377 int - var yyrt1377 bool - if yyl1377 > cap(yyv1377) { + } else if yyl1312 > 0 { + var yyrr1312, yyrl1312 int + var yyrt1312 bool + if yyl1312 > cap(yyv1312) { - yyrg1377 := len(yyv1377) > 0 - yyv21377 := yyv1377 - yyrl1377, yyrt1377 = z.DecInferLen(yyl1377, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1377 { - if yyrl1377 <= cap(yyv1377) { - yyv1377 = yyv1377[:yyrl1377] + yyrg1312 := len(yyv1312) > 0 + yyv21312 := yyv1312 + yyrl1312, yyrt1312 = z.DecInferLen(yyl1312, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1312 { + if yyrl1312 <= cap(yyv1312) { + yyv1312 = yyv1312[:yyrl1312] } else { - yyv1377 = make([]ThirdPartyResource, yyrl1377) + yyv1312 = make([]ThirdPartyResource, yyrl1312) } } else { - yyv1377 = make([]ThirdPartyResource, yyrl1377) + yyv1312 = make([]ThirdPartyResource, yyrl1312) } - yyc1377 = true - yyrr1377 = len(yyv1377) - if yyrg1377 { - copy(yyv1377, yyv21377) + yyc1312 = true + yyrr1312 = len(yyv1312) + if yyrg1312 { + copy(yyv1312, yyv21312) } - } else if yyl1377 != len(yyv1377) { - yyv1377 = yyv1377[:yyl1377] - yyc1377 = true + } else if yyl1312 != len(yyv1312) { + yyv1312 = yyv1312[:yyl1312] + yyc1312 = true } - yyj1377 := 0 - for ; yyj1377 < yyrr1377; yyj1377++ { - yyh1377.ElemContainerState(yyj1377) + yyj1312 := 0 + for ; yyj1312 < yyrr1312; yyj1312++ { + yyh1312.ElemContainerState(yyj1312) if r.TryDecodeAsNil() { - yyv1377[yyj1377] = ThirdPartyResource{} + yyv1312[yyj1312] = ThirdPartyResource{} } else { - yyv1378 := &yyv1377[yyj1377] - yyv1378.CodecDecodeSelf(d) + yyv1313 := &yyv1312[yyj1312] + yyv1313.CodecDecodeSelf(d) } } - if yyrt1377 { - for ; yyj1377 < yyl1377; yyj1377++ { - yyv1377 = append(yyv1377, ThirdPartyResource{}) - yyh1377.ElemContainerState(yyj1377) + if yyrt1312 { + for ; yyj1312 < yyl1312; yyj1312++ { + yyv1312 = append(yyv1312, ThirdPartyResource{}) + yyh1312.ElemContainerState(yyj1312) if r.TryDecodeAsNil() { - yyv1377[yyj1377] = ThirdPartyResource{} + yyv1312[yyj1312] = ThirdPartyResource{} } else { - yyv1379 := &yyv1377[yyj1377] - yyv1379.CodecDecodeSelf(d) + yyv1314 := &yyv1312[yyj1312] + yyv1314.CodecDecodeSelf(d) } } } } else { - yyj1377 := 0 - for ; !r.CheckBreak(); yyj1377++ { + yyj1312 := 0 + for ; !r.CheckBreak(); yyj1312++ { - if yyj1377 >= len(yyv1377) { - yyv1377 = append(yyv1377, ThirdPartyResource{}) // var yyz1377 ThirdPartyResource - yyc1377 = true + if yyj1312 >= len(yyv1312) { + yyv1312 = append(yyv1312, ThirdPartyResource{}) // var yyz1312 ThirdPartyResource + yyc1312 = true } - yyh1377.ElemContainerState(yyj1377) - if yyj1377 < len(yyv1377) { + yyh1312.ElemContainerState(yyj1312) + if yyj1312 < len(yyv1312) { if r.TryDecodeAsNil() { - yyv1377[yyj1377] = ThirdPartyResource{} + yyv1312[yyj1312] = ThirdPartyResource{} } else { - yyv1380 := &yyv1377[yyj1377] - yyv1380.CodecDecodeSelf(d) + yyv1315 := &yyv1312[yyj1312] + yyv1315.CodecDecodeSelf(d) } } else { @@ -16728,17 +16016,17 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * } } - if yyj1377 < len(yyv1377) { - yyv1377 = yyv1377[:yyj1377] - yyc1377 = true - } else if yyj1377 == 0 && yyv1377 == nil { - yyv1377 = []ThirdPartyResource{} - yyc1377 = true + if yyj1312 < len(yyv1312) { + yyv1312 = yyv1312[:yyj1312] + yyc1312 = true + } else if yyj1312 == 0 && yyv1312 == nil { + yyv1312 = []ThirdPartyResource{} + yyc1312 = true } } - yyh1377.End() - if yyc1377 { - *v = yyv1377 + yyh1312.End() + if yyc1312 { + *v = yyv1312 } } @@ -16747,212 +16035,96 @@ func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1381 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1382 := &yyv1381 - yy1382.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1383 := *v - yyh1383, yyl1383 := z.DecSliceHelperStart() - var yyc1383 bool - if yyl1383 == 0 { - if yyv1383 == nil { - yyv1383 = []Deployment{} - yyc1383 = true - } else if len(yyv1383) != 0 { - yyv1383 = yyv1383[:0] - yyc1383 = true - } - } else if yyl1383 > 0 { - var yyrr1383, yyrl1383 int - var yyrt1383 bool - if yyl1383 > cap(yyv1383) { - - yyrg1383 := len(yyv1383) > 0 - yyv21383 := yyv1383 - yyrl1383, yyrt1383 = z.DecInferLen(yyl1383, z.DecBasicHandle().MaxInitLen, 800) - if yyrt1383 { - if yyrl1383 <= cap(yyv1383) { - yyv1383 = yyv1383[:yyrl1383] - } else { - yyv1383 = make([]Deployment, yyrl1383) - } - } else { - yyv1383 = make([]Deployment, yyrl1383) - } - yyc1383 = true - yyrr1383 = len(yyv1383) - if yyrg1383 { - copy(yyv1383, yyv21383) - } - } else if yyl1383 != len(yyv1383) { - yyv1383 = yyv1383[:yyl1383] - yyc1383 = true - } - yyj1383 := 0 - for ; yyj1383 < yyrr1383; yyj1383++ { - yyh1383.ElemContainerState(yyj1383) - if r.TryDecodeAsNil() { - yyv1383[yyj1383] = Deployment{} - } else { - yyv1384 := &yyv1383[yyj1383] - yyv1384.CodecDecodeSelf(d) - } - - } - if yyrt1383 { - for ; yyj1383 < yyl1383; yyj1383++ { - yyv1383 = append(yyv1383, Deployment{}) - yyh1383.ElemContainerState(yyj1383) - if r.TryDecodeAsNil() { - yyv1383[yyj1383] = Deployment{} - } else { - yyv1385 := &yyv1383[yyj1383] - yyv1385.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1383 := 0 - for ; !r.CheckBreak(); yyj1383++ { - - if yyj1383 >= len(yyv1383) { - yyv1383 = append(yyv1383, Deployment{}) // var yyz1383 Deployment - yyc1383 = true - } - yyh1383.ElemContainerState(yyj1383) - if yyj1383 < len(yyv1383) { - if r.TryDecodeAsNil() { - yyv1383[yyj1383] = Deployment{} - } else { - yyv1386 := &yyv1383[yyj1383] - yyv1386.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1383 < len(yyv1383) { - yyv1383 = yyv1383[:yyj1383] - yyc1383 = true - } else if yyj1383 == 0 && yyv1383 == nil { - yyv1383 = []Deployment{} - yyc1383 = true - } - } - yyh1383.End() - if yyc1383 { - *v = yyv1383 - } -} - -func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1387 := range v { + for _, yyv1316 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1388 := &yyv1387 - yy1388.CodecEncodeSelf(e) + yy1317 := &yyv1316 + yy1317.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1389 := *v - yyh1389, yyl1389 := z.DecSliceHelperStart() - var yyc1389 bool - if yyl1389 == 0 { - if yyv1389 == nil { - yyv1389 = []DaemonSet{} - yyc1389 = true - } else if len(yyv1389) != 0 { - yyv1389 = yyv1389[:0] - yyc1389 = true + yyv1318 := *v + yyh1318, yyl1318 := z.DecSliceHelperStart() + var yyc1318 bool + if yyl1318 == 0 { + if yyv1318 == nil { + yyv1318 = []Deployment{} + yyc1318 = true + } else if len(yyv1318) != 0 { + yyv1318 = yyv1318[:0] + yyc1318 = true } - } else if yyl1389 > 0 { - var yyrr1389, yyrl1389 int - var yyrt1389 bool - if yyl1389 > cap(yyv1389) { + } else if yyl1318 > 0 { + var yyrr1318, yyrl1318 int + var yyrt1318 bool + if yyl1318 > cap(yyv1318) { - yyrg1389 := len(yyv1389) > 0 - yyv21389 := yyv1389 - yyrl1389, yyrt1389 = z.DecInferLen(yyl1389, z.DecBasicHandle().MaxInitLen, 728) - if yyrt1389 { - if yyrl1389 <= cap(yyv1389) { - yyv1389 = yyv1389[:yyrl1389] + yyrg1318 := len(yyv1318) > 0 + yyv21318 := yyv1318 + yyrl1318, yyrt1318 = z.DecInferLen(yyl1318, z.DecBasicHandle().MaxInitLen, 800) + if yyrt1318 { + if yyrl1318 <= cap(yyv1318) { + yyv1318 = yyv1318[:yyrl1318] } else { - yyv1389 = make([]DaemonSet, yyrl1389) + yyv1318 = make([]Deployment, yyrl1318) } } else { - yyv1389 = make([]DaemonSet, yyrl1389) + yyv1318 = make([]Deployment, yyrl1318) } - yyc1389 = true - yyrr1389 = len(yyv1389) - if yyrg1389 { - copy(yyv1389, yyv21389) + yyc1318 = true + yyrr1318 = len(yyv1318) + if yyrg1318 { + copy(yyv1318, yyv21318) } - } else if yyl1389 != len(yyv1389) { - yyv1389 = yyv1389[:yyl1389] - yyc1389 = true + } else if yyl1318 != len(yyv1318) { + yyv1318 = yyv1318[:yyl1318] + yyc1318 = true } - yyj1389 := 0 - for ; yyj1389 < yyrr1389; yyj1389++ { - yyh1389.ElemContainerState(yyj1389) + yyj1318 := 0 + for ; yyj1318 < yyrr1318; yyj1318++ { + yyh1318.ElemContainerState(yyj1318) if r.TryDecodeAsNil() { - yyv1389[yyj1389] = DaemonSet{} + yyv1318[yyj1318] = Deployment{} } else { - yyv1390 := &yyv1389[yyj1389] - yyv1390.CodecDecodeSelf(d) + yyv1319 := &yyv1318[yyj1318] + yyv1319.CodecDecodeSelf(d) } } - if yyrt1389 { - for ; yyj1389 < yyl1389; yyj1389++ { - yyv1389 = append(yyv1389, DaemonSet{}) - yyh1389.ElemContainerState(yyj1389) + if yyrt1318 { + for ; yyj1318 < yyl1318; yyj1318++ { + yyv1318 = append(yyv1318, Deployment{}) + yyh1318.ElemContainerState(yyj1318) if r.TryDecodeAsNil() { - yyv1389[yyj1389] = DaemonSet{} + yyv1318[yyj1318] = Deployment{} } else { - yyv1391 := &yyv1389[yyj1389] - yyv1391.CodecDecodeSelf(d) + yyv1320 := &yyv1318[yyj1318] + yyv1320.CodecDecodeSelf(d) } } } } else { - yyj1389 := 0 - for ; !r.CheckBreak(); yyj1389++ { + yyj1318 := 0 + for ; !r.CheckBreak(); yyj1318++ { - if yyj1389 >= len(yyv1389) { - yyv1389 = append(yyv1389, DaemonSet{}) // var yyz1389 DaemonSet - yyc1389 = true + if yyj1318 >= len(yyv1318) { + yyv1318 = append(yyv1318, Deployment{}) // var yyz1318 Deployment + yyc1318 = true } - yyh1389.ElemContainerState(yyj1389) - if yyj1389 < len(yyv1389) { + yyh1318.ElemContainerState(yyj1318) + if yyj1318 < len(yyv1318) { if r.TryDecodeAsNil() { - yyv1389[yyj1389] = DaemonSet{} + yyv1318[yyj1318] = Deployment{} } else { - yyv1392 := &yyv1389[yyj1389] - yyv1392.CodecDecodeSelf(d) + yyv1321 := &yyv1318[yyj1318] + yyv1321.CodecDecodeSelf(d) } } else { @@ -16960,115 +16132,115 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) } } - if yyj1389 < len(yyv1389) { - yyv1389 = yyv1389[:yyj1389] - yyc1389 = true - } else if yyj1389 == 0 && yyv1389 == nil { - yyv1389 = []DaemonSet{} - yyc1389 = true + if yyj1318 < len(yyv1318) { + yyv1318 = yyv1318[:yyj1318] + yyc1318 = true + } else if yyj1318 == 0 && yyv1318 == nil { + yyv1318 = []Deployment{} + yyc1318 = true } } - yyh1389.End() - if yyc1389 { - *v = yyv1389 + yyh1318.End() + if yyc1318 { + *v = yyv1318 } } -func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1393 := range v { + for _, yyv1322 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1394 := &yyv1393 - yy1394.CodecEncodeSelf(e) + yy1323 := &yyv1322 + yy1323.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1395 := *v - yyh1395, yyl1395 := z.DecSliceHelperStart() - var yyc1395 bool - if yyl1395 == 0 { - if yyv1395 == nil { - yyv1395 = []ThirdPartyResourceData{} - yyc1395 = true - } else if len(yyv1395) != 0 { - yyv1395 = yyv1395[:0] - yyc1395 = true + yyv1324 := *v + yyh1324, yyl1324 := z.DecSliceHelperStart() + var yyc1324 bool + if yyl1324 == 0 { + if yyv1324 == nil { + yyv1324 = []DaemonSet{} + yyc1324 = true + } else if len(yyv1324) != 0 { + yyv1324 = yyv1324[:0] + yyc1324 = true } - } else if yyl1395 > 0 { - var yyrr1395, yyrl1395 int - var yyrt1395 bool - if yyl1395 > cap(yyv1395) { + } else if yyl1324 > 0 { + var yyrr1324, yyrl1324 int + var yyrt1324 bool + if yyl1324 > cap(yyv1324) { - yyrg1395 := len(yyv1395) > 0 - yyv21395 := yyv1395 - yyrl1395, yyrt1395 = z.DecInferLen(yyl1395, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1395 { - if yyrl1395 <= cap(yyv1395) { - yyv1395 = yyv1395[:yyrl1395] + yyrg1324 := len(yyv1324) > 0 + yyv21324 := yyv1324 + yyrl1324, yyrt1324 = z.DecInferLen(yyl1324, z.DecBasicHandle().MaxInitLen, 728) + if yyrt1324 { + if yyrl1324 <= cap(yyv1324) { + yyv1324 = yyv1324[:yyrl1324] } else { - yyv1395 = make([]ThirdPartyResourceData, yyrl1395) + yyv1324 = make([]DaemonSet, yyrl1324) } } else { - yyv1395 = make([]ThirdPartyResourceData, yyrl1395) + yyv1324 = make([]DaemonSet, yyrl1324) } - yyc1395 = true - yyrr1395 = len(yyv1395) - if yyrg1395 { - copy(yyv1395, yyv21395) + yyc1324 = true + yyrr1324 = len(yyv1324) + if yyrg1324 { + copy(yyv1324, yyv21324) } - } else if yyl1395 != len(yyv1395) { - yyv1395 = yyv1395[:yyl1395] - yyc1395 = true + } else if yyl1324 != len(yyv1324) { + yyv1324 = yyv1324[:yyl1324] + yyc1324 = true } - yyj1395 := 0 - for ; yyj1395 < yyrr1395; yyj1395++ { - yyh1395.ElemContainerState(yyj1395) + yyj1324 := 0 + for ; yyj1324 < yyrr1324; yyj1324++ { + yyh1324.ElemContainerState(yyj1324) if r.TryDecodeAsNil() { - yyv1395[yyj1395] = ThirdPartyResourceData{} + yyv1324[yyj1324] = DaemonSet{} } else { - yyv1396 := &yyv1395[yyj1395] - yyv1396.CodecDecodeSelf(d) + yyv1325 := &yyv1324[yyj1324] + yyv1325.CodecDecodeSelf(d) } } - if yyrt1395 { - for ; yyj1395 < yyl1395; yyj1395++ { - yyv1395 = append(yyv1395, ThirdPartyResourceData{}) - yyh1395.ElemContainerState(yyj1395) + if yyrt1324 { + for ; yyj1324 < yyl1324; yyj1324++ { + yyv1324 = append(yyv1324, DaemonSet{}) + yyh1324.ElemContainerState(yyj1324) if r.TryDecodeAsNil() { - yyv1395[yyj1395] = ThirdPartyResourceData{} + yyv1324[yyj1324] = DaemonSet{} } else { - yyv1397 := &yyv1395[yyj1395] - yyv1397.CodecDecodeSelf(d) + yyv1326 := &yyv1324[yyj1324] + yyv1326.CodecDecodeSelf(d) } } } } else { - yyj1395 := 0 - for ; !r.CheckBreak(); yyj1395++ { + yyj1324 := 0 + for ; !r.CheckBreak(); yyj1324++ { - if yyj1395 >= len(yyv1395) { - yyv1395 = append(yyv1395, ThirdPartyResourceData{}) // var yyz1395 ThirdPartyResourceData - yyc1395 = true + if yyj1324 >= len(yyv1324) { + yyv1324 = append(yyv1324, DaemonSet{}) // var yyz1324 DaemonSet + yyc1324 = true } - yyh1395.ElemContainerState(yyj1395) - if yyj1395 < len(yyv1395) { + yyh1324.ElemContainerState(yyj1324) + if yyj1324 < len(yyv1324) { if r.TryDecodeAsNil() { - yyv1395[yyj1395] = ThirdPartyResourceData{} + yyv1324[yyj1324] = DaemonSet{} } else { - yyv1398 := &yyv1395[yyj1395] - yyv1398.CodecDecodeSelf(d) + yyv1327 := &yyv1324[yyj1324] + yyv1327.CodecDecodeSelf(d) } } else { @@ -17076,115 +16248,115 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD } } - if yyj1395 < len(yyv1395) { - yyv1395 = yyv1395[:yyj1395] - yyc1395 = true - } else if yyj1395 == 0 && yyv1395 == nil { - yyv1395 = []ThirdPartyResourceData{} - yyc1395 = true + if yyj1324 < len(yyv1324) { + yyv1324 = yyv1324[:yyj1324] + yyc1324 = true + } else if yyj1324 == 0 && yyv1324 == nil { + yyv1324 = []DaemonSet{} + yyc1324 = true } } - yyh1395.End() - if yyc1395 { - *v = yyv1395 + yyh1324.End() + if yyc1324 { + *v = yyv1324 } } -func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceData, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1399 := range v { + for _, yyv1328 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1400 := &yyv1399 - yy1400.CodecEncodeSelf(e) + yy1329 := &yyv1328 + yy1329.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceData, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1401 := *v - yyh1401, yyl1401 := z.DecSliceHelperStart() - var yyc1401 bool - if yyl1401 == 0 { - if yyv1401 == nil { - yyv1401 = []Ingress{} - yyc1401 = true - } else if len(yyv1401) != 0 { - yyv1401 = yyv1401[:0] - yyc1401 = true + yyv1330 := *v + yyh1330, yyl1330 := z.DecSliceHelperStart() + var yyc1330 bool + if yyl1330 == 0 { + if yyv1330 == nil { + yyv1330 = []ThirdPartyResourceData{} + yyc1330 = true + } else if len(yyv1330) != 0 { + yyv1330 = yyv1330[:0] + yyc1330 = true } - } else if yyl1401 > 0 { - var yyrr1401, yyrl1401 int - var yyrt1401 bool - if yyl1401 > cap(yyv1401) { + } else if yyl1330 > 0 { + var yyrr1330, yyrl1330 int + var yyrt1330 bool + if yyl1330 > cap(yyv1330) { - yyrg1401 := len(yyv1401) > 0 - yyv21401 := yyv1401 - yyrl1401, yyrt1401 = z.DecInferLen(yyl1401, z.DecBasicHandle().MaxInitLen, 336) - if yyrt1401 { - if yyrl1401 <= cap(yyv1401) { - yyv1401 = yyv1401[:yyrl1401] + yyrg1330 := len(yyv1330) > 0 + yyv21330 := yyv1330 + yyrl1330, yyrt1330 = z.DecInferLen(yyl1330, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1330 { + if yyrl1330 <= cap(yyv1330) { + yyv1330 = yyv1330[:yyrl1330] } else { - yyv1401 = make([]Ingress, yyrl1401) + yyv1330 = make([]ThirdPartyResourceData, yyrl1330) } } else { - yyv1401 = make([]Ingress, yyrl1401) + yyv1330 = make([]ThirdPartyResourceData, yyrl1330) } - yyc1401 = true - yyrr1401 = len(yyv1401) - if yyrg1401 { - copy(yyv1401, yyv21401) + yyc1330 = true + yyrr1330 = len(yyv1330) + if yyrg1330 { + copy(yyv1330, yyv21330) } - } else if yyl1401 != len(yyv1401) { - yyv1401 = yyv1401[:yyl1401] - yyc1401 = true + } else if yyl1330 != len(yyv1330) { + yyv1330 = yyv1330[:yyl1330] + yyc1330 = true } - yyj1401 := 0 - for ; yyj1401 < yyrr1401; yyj1401++ { - yyh1401.ElemContainerState(yyj1401) + yyj1330 := 0 + for ; yyj1330 < yyrr1330; yyj1330++ { + yyh1330.ElemContainerState(yyj1330) if r.TryDecodeAsNil() { - yyv1401[yyj1401] = Ingress{} + yyv1330[yyj1330] = ThirdPartyResourceData{} } else { - yyv1402 := &yyv1401[yyj1401] - yyv1402.CodecDecodeSelf(d) + yyv1331 := &yyv1330[yyj1330] + yyv1331.CodecDecodeSelf(d) } } - if yyrt1401 { - for ; yyj1401 < yyl1401; yyj1401++ { - yyv1401 = append(yyv1401, Ingress{}) - yyh1401.ElemContainerState(yyj1401) + if yyrt1330 { + for ; yyj1330 < yyl1330; yyj1330++ { + yyv1330 = append(yyv1330, ThirdPartyResourceData{}) + yyh1330.ElemContainerState(yyj1330) if r.TryDecodeAsNil() { - yyv1401[yyj1401] = Ingress{} + yyv1330[yyj1330] = ThirdPartyResourceData{} } else { - yyv1403 := &yyv1401[yyj1401] - yyv1403.CodecDecodeSelf(d) + yyv1332 := &yyv1330[yyj1330] + yyv1332.CodecDecodeSelf(d) } } } } else { - yyj1401 := 0 - for ; !r.CheckBreak(); yyj1401++ { + yyj1330 := 0 + for ; !r.CheckBreak(); yyj1330++ { - if yyj1401 >= len(yyv1401) { - yyv1401 = append(yyv1401, Ingress{}) // var yyz1401 Ingress - yyc1401 = true + if yyj1330 >= len(yyv1330) { + yyv1330 = append(yyv1330, ThirdPartyResourceData{}) // var yyz1330 ThirdPartyResourceData + yyc1330 = true } - yyh1401.ElemContainerState(yyj1401) - if yyj1401 < len(yyv1401) { + yyh1330.ElemContainerState(yyj1330) + if yyj1330 < len(yyv1330) { if r.TryDecodeAsNil() { - yyv1401[yyj1401] = Ingress{} + yyv1330[yyj1330] = ThirdPartyResourceData{} } else { - yyv1404 := &yyv1401[yyj1401] - yyv1404.CodecDecodeSelf(d) + yyv1333 := &yyv1330[yyj1330] + yyv1333.CodecDecodeSelf(d) } } else { @@ -17192,115 +16364,115 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { } } - if yyj1401 < len(yyv1401) { - yyv1401 = yyv1401[:yyj1401] - yyc1401 = true - } else if yyj1401 == 0 && yyv1401 == nil { - yyv1401 = []Ingress{} - yyc1401 = true + if yyj1330 < len(yyv1330) { + yyv1330 = yyv1330[:yyj1330] + yyc1330 = true + } else if yyj1330 == 0 && yyv1330 == nil { + yyv1330 = []ThirdPartyResourceData{} + yyc1330 = true } } - yyh1401.End() - if yyc1401 { - *v = yyv1401 + yyh1330.End() + if yyc1330 { + *v = yyv1330 } } -func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1405 := range v { + for _, yyv1334 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1406 := &yyv1405 - yy1406.CodecEncodeSelf(e) + yy1335 := &yyv1334 + yy1335.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1407 := *v - yyh1407, yyl1407 := z.DecSliceHelperStart() - var yyc1407 bool - if yyl1407 == 0 { - if yyv1407 == nil { - yyv1407 = []IngressTLS{} - yyc1407 = true - } else if len(yyv1407) != 0 { - yyv1407 = yyv1407[:0] - yyc1407 = true + yyv1336 := *v + yyh1336, yyl1336 := z.DecSliceHelperStart() + var yyc1336 bool + if yyl1336 == 0 { + if yyv1336 == nil { + yyv1336 = []Ingress{} + yyc1336 = true + } else if len(yyv1336) != 0 { + yyv1336 = yyv1336[:0] + yyc1336 = true } - } else if yyl1407 > 0 { - var yyrr1407, yyrl1407 int - var yyrt1407 bool - if yyl1407 > cap(yyv1407) { + } else if yyl1336 > 0 { + var yyrr1336, yyrl1336 int + var yyrt1336 bool + if yyl1336 > cap(yyv1336) { - yyrg1407 := len(yyv1407) > 0 - yyv21407 := yyv1407 - yyrl1407, yyrt1407 = z.DecInferLen(yyl1407, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1407 { - if yyrl1407 <= cap(yyv1407) { - yyv1407 = yyv1407[:yyrl1407] + yyrg1336 := len(yyv1336) > 0 + yyv21336 := yyv1336 + yyrl1336, yyrt1336 = z.DecInferLen(yyl1336, z.DecBasicHandle().MaxInitLen, 336) + if yyrt1336 { + if yyrl1336 <= cap(yyv1336) { + yyv1336 = yyv1336[:yyrl1336] } else { - yyv1407 = make([]IngressTLS, yyrl1407) + yyv1336 = make([]Ingress, yyrl1336) } } else { - yyv1407 = make([]IngressTLS, yyrl1407) + yyv1336 = make([]Ingress, yyrl1336) } - yyc1407 = true - yyrr1407 = len(yyv1407) - if yyrg1407 { - copy(yyv1407, yyv21407) + yyc1336 = true + yyrr1336 = len(yyv1336) + if yyrg1336 { + copy(yyv1336, yyv21336) } - } else if yyl1407 != len(yyv1407) { - yyv1407 = yyv1407[:yyl1407] - yyc1407 = true + } else if yyl1336 != len(yyv1336) { + yyv1336 = yyv1336[:yyl1336] + yyc1336 = true } - yyj1407 := 0 - for ; yyj1407 < yyrr1407; yyj1407++ { - yyh1407.ElemContainerState(yyj1407) + yyj1336 := 0 + for ; yyj1336 < yyrr1336; yyj1336++ { + yyh1336.ElemContainerState(yyj1336) if r.TryDecodeAsNil() { - yyv1407[yyj1407] = IngressTLS{} + yyv1336[yyj1336] = Ingress{} } else { - yyv1408 := &yyv1407[yyj1407] - yyv1408.CodecDecodeSelf(d) + yyv1337 := &yyv1336[yyj1336] + yyv1337.CodecDecodeSelf(d) } } - if yyrt1407 { - for ; yyj1407 < yyl1407; yyj1407++ { - yyv1407 = append(yyv1407, IngressTLS{}) - yyh1407.ElemContainerState(yyj1407) + if yyrt1336 { + for ; yyj1336 < yyl1336; yyj1336++ { + yyv1336 = append(yyv1336, Ingress{}) + yyh1336.ElemContainerState(yyj1336) if r.TryDecodeAsNil() { - yyv1407[yyj1407] = IngressTLS{} + yyv1336[yyj1336] = Ingress{} } else { - yyv1409 := &yyv1407[yyj1407] - yyv1409.CodecDecodeSelf(d) + yyv1338 := &yyv1336[yyj1336] + yyv1338.CodecDecodeSelf(d) } } } } else { - yyj1407 := 0 - for ; !r.CheckBreak(); yyj1407++ { + yyj1336 := 0 + for ; !r.CheckBreak(); yyj1336++ { - if yyj1407 >= len(yyv1407) { - yyv1407 = append(yyv1407, IngressTLS{}) // var yyz1407 IngressTLS - yyc1407 = true + if yyj1336 >= len(yyv1336) { + yyv1336 = append(yyv1336, Ingress{}) // var yyz1336 Ingress + yyc1336 = true } - yyh1407.ElemContainerState(yyj1407) - if yyj1407 < len(yyv1407) { + yyh1336.ElemContainerState(yyj1336) + if yyj1336 < len(yyv1336) { if r.TryDecodeAsNil() { - yyv1407[yyj1407] = IngressTLS{} + yyv1336[yyj1336] = Ingress{} } else { - yyv1410 := &yyv1407[yyj1407] - yyv1410.CodecDecodeSelf(d) + yyv1339 := &yyv1336[yyj1336] + yyv1339.CodecDecodeSelf(d) } } else { @@ -17308,115 +16480,115 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode } } - if yyj1407 < len(yyv1407) { - yyv1407 = yyv1407[:yyj1407] - yyc1407 = true - } else if yyj1407 == 0 && yyv1407 == nil { - yyv1407 = []IngressTLS{} - yyc1407 = true + if yyj1336 < len(yyv1336) { + yyv1336 = yyv1336[:yyj1336] + yyc1336 = true + } else if yyj1336 == 0 && yyv1336 == nil { + yyv1336 = []Ingress{} + yyc1336 = true } } - yyh1407.End() - if yyc1407 { - *v = yyv1407 + yyh1336.End() + if yyc1336 { + *v = yyv1336 } } -func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1411 := range v { + for _, yyv1340 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1412 := &yyv1411 - yy1412.CodecEncodeSelf(e) + yy1341 := &yyv1340 + yy1341.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1413 := *v - yyh1413, yyl1413 := z.DecSliceHelperStart() - var yyc1413 bool - if yyl1413 == 0 { - if yyv1413 == nil { - yyv1413 = []IngressRule{} - yyc1413 = true - } else if len(yyv1413) != 0 { - yyv1413 = yyv1413[:0] - yyc1413 = true - } - } else if yyl1413 > 0 { - var yyrr1413, yyrl1413 int - var yyrt1413 bool - if yyl1413 > cap(yyv1413) { - - yyrg1413 := len(yyv1413) > 0 - yyv21413 := yyv1413 - yyrl1413, yyrt1413 = z.DecInferLen(yyl1413, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1413 { - if yyrl1413 <= cap(yyv1413) { - yyv1413 = yyv1413[:yyrl1413] + _, _, _ = h, z, r + + yyv1342 := *v + yyh1342, yyl1342 := z.DecSliceHelperStart() + var yyc1342 bool + if yyl1342 == 0 { + if yyv1342 == nil { + yyv1342 = []IngressTLS{} + yyc1342 = true + } else if len(yyv1342) != 0 { + yyv1342 = yyv1342[:0] + yyc1342 = true + } + } else if yyl1342 > 0 { + var yyrr1342, yyrl1342 int + var yyrt1342 bool + if yyl1342 > cap(yyv1342) { + + yyrg1342 := len(yyv1342) > 0 + yyv21342 := yyv1342 + yyrl1342, yyrt1342 = z.DecInferLen(yyl1342, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1342 { + if yyrl1342 <= cap(yyv1342) { + yyv1342 = yyv1342[:yyrl1342] } else { - yyv1413 = make([]IngressRule, yyrl1413) + yyv1342 = make([]IngressTLS, yyrl1342) } } else { - yyv1413 = make([]IngressRule, yyrl1413) + yyv1342 = make([]IngressTLS, yyrl1342) } - yyc1413 = true - yyrr1413 = len(yyv1413) - if yyrg1413 { - copy(yyv1413, yyv21413) + yyc1342 = true + yyrr1342 = len(yyv1342) + if yyrg1342 { + copy(yyv1342, yyv21342) } - } else if yyl1413 != len(yyv1413) { - yyv1413 = yyv1413[:yyl1413] - yyc1413 = true + } else if yyl1342 != len(yyv1342) { + yyv1342 = yyv1342[:yyl1342] + yyc1342 = true } - yyj1413 := 0 - for ; yyj1413 < yyrr1413; yyj1413++ { - yyh1413.ElemContainerState(yyj1413) + yyj1342 := 0 + for ; yyj1342 < yyrr1342; yyj1342++ { + yyh1342.ElemContainerState(yyj1342) if r.TryDecodeAsNil() { - yyv1413[yyj1413] = IngressRule{} + yyv1342[yyj1342] = IngressTLS{} } else { - yyv1414 := &yyv1413[yyj1413] - yyv1414.CodecDecodeSelf(d) + yyv1343 := &yyv1342[yyj1342] + yyv1343.CodecDecodeSelf(d) } } - if yyrt1413 { - for ; yyj1413 < yyl1413; yyj1413++ { - yyv1413 = append(yyv1413, IngressRule{}) - yyh1413.ElemContainerState(yyj1413) + if yyrt1342 { + for ; yyj1342 < yyl1342; yyj1342++ { + yyv1342 = append(yyv1342, IngressTLS{}) + yyh1342.ElemContainerState(yyj1342) if r.TryDecodeAsNil() { - yyv1413[yyj1413] = IngressRule{} + yyv1342[yyj1342] = IngressTLS{} } else { - yyv1415 := &yyv1413[yyj1413] - yyv1415.CodecDecodeSelf(d) + yyv1344 := &yyv1342[yyj1342] + yyv1344.CodecDecodeSelf(d) } } } } else { - yyj1413 := 0 - for ; !r.CheckBreak(); yyj1413++ { + yyj1342 := 0 + for ; !r.CheckBreak(); yyj1342++ { - if yyj1413 >= len(yyv1413) { - yyv1413 = append(yyv1413, IngressRule{}) // var yyz1413 IngressRule - yyc1413 = true + if yyj1342 >= len(yyv1342) { + yyv1342 = append(yyv1342, IngressTLS{}) // var yyz1342 IngressTLS + yyc1342 = true } - yyh1413.ElemContainerState(yyj1413) - if yyj1413 < len(yyv1413) { + yyh1342.ElemContainerState(yyj1342) + if yyj1342 < len(yyv1342) { if r.TryDecodeAsNil() { - yyv1413[yyj1413] = IngressRule{} + yyv1342[yyj1342] = IngressTLS{} } else { - yyv1416 := &yyv1413[yyj1413] - yyv1416.CodecDecodeSelf(d) + yyv1345 := &yyv1342[yyj1342] + yyv1345.CodecDecodeSelf(d) } } else { @@ -17424,115 +16596,115 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco } } - if yyj1413 < len(yyv1413) { - yyv1413 = yyv1413[:yyj1413] - yyc1413 = true - } else if yyj1413 == 0 && yyv1413 == nil { - yyv1413 = []IngressRule{} - yyc1413 = true + if yyj1342 < len(yyv1342) { + yyv1342 = yyv1342[:yyj1342] + yyc1342 = true + } else if yyj1342 == 0 && yyv1342 == nil { + yyv1342 = []IngressTLS{} + yyc1342 = true } } - yyh1413.End() - if yyc1413 { - *v = yyv1413 + yyh1342.End() + if yyc1342 { + *v = yyv1342 } } -func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1417 := range v { + for _, yyv1346 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1418 := &yyv1417 - yy1418.CodecEncodeSelf(e) + yy1347 := &yyv1346 + yy1347.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1419 := *v - yyh1419, yyl1419 := z.DecSliceHelperStart() - var yyc1419 bool - if yyl1419 == 0 { - if yyv1419 == nil { - yyv1419 = []HTTPIngressPath{} - yyc1419 = true - } else if len(yyv1419) != 0 { - yyv1419 = yyv1419[:0] - yyc1419 = true + yyv1348 := *v + yyh1348, yyl1348 := z.DecSliceHelperStart() + var yyc1348 bool + if yyl1348 == 0 { + if yyv1348 == nil { + yyv1348 = []IngressRule{} + yyc1348 = true + } else if len(yyv1348) != 0 { + yyv1348 = yyv1348[:0] + yyc1348 = true } - } else if yyl1419 > 0 { - var yyrr1419, yyrl1419 int - var yyrt1419 bool - if yyl1419 > cap(yyv1419) { + } else if yyl1348 > 0 { + var yyrr1348, yyrl1348 int + var yyrt1348 bool + if yyl1348 > cap(yyv1348) { - yyrg1419 := len(yyv1419) > 0 - yyv21419 := yyv1419 - yyrl1419, yyrt1419 = z.DecInferLen(yyl1419, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1419 { - if yyrl1419 <= cap(yyv1419) { - yyv1419 = yyv1419[:yyrl1419] + yyrg1348 := len(yyv1348) > 0 + yyv21348 := yyv1348 + yyrl1348, yyrt1348 = z.DecInferLen(yyl1348, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1348 { + if yyrl1348 <= cap(yyv1348) { + yyv1348 = yyv1348[:yyrl1348] } else { - yyv1419 = make([]HTTPIngressPath, yyrl1419) + yyv1348 = make([]IngressRule, yyrl1348) } } else { - yyv1419 = make([]HTTPIngressPath, yyrl1419) + yyv1348 = make([]IngressRule, yyrl1348) } - yyc1419 = true - yyrr1419 = len(yyv1419) - if yyrg1419 { - copy(yyv1419, yyv21419) + yyc1348 = true + yyrr1348 = len(yyv1348) + if yyrg1348 { + copy(yyv1348, yyv21348) } - } else if yyl1419 != len(yyv1419) { - yyv1419 = yyv1419[:yyl1419] - yyc1419 = true + } else if yyl1348 != len(yyv1348) { + yyv1348 = yyv1348[:yyl1348] + yyc1348 = true } - yyj1419 := 0 - for ; yyj1419 < yyrr1419; yyj1419++ { - yyh1419.ElemContainerState(yyj1419) + yyj1348 := 0 + for ; yyj1348 < yyrr1348; yyj1348++ { + yyh1348.ElemContainerState(yyj1348) if r.TryDecodeAsNil() { - yyv1419[yyj1419] = HTTPIngressPath{} + yyv1348[yyj1348] = IngressRule{} } else { - yyv1420 := &yyv1419[yyj1419] - yyv1420.CodecDecodeSelf(d) + yyv1349 := &yyv1348[yyj1348] + yyv1349.CodecDecodeSelf(d) } } - if yyrt1419 { - for ; yyj1419 < yyl1419; yyj1419++ { - yyv1419 = append(yyv1419, HTTPIngressPath{}) - yyh1419.ElemContainerState(yyj1419) + if yyrt1348 { + for ; yyj1348 < yyl1348; yyj1348++ { + yyv1348 = append(yyv1348, IngressRule{}) + yyh1348.ElemContainerState(yyj1348) if r.TryDecodeAsNil() { - yyv1419[yyj1419] = HTTPIngressPath{} + yyv1348[yyj1348] = IngressRule{} } else { - yyv1421 := &yyv1419[yyj1419] - yyv1421.CodecDecodeSelf(d) + yyv1350 := &yyv1348[yyj1348] + yyv1350.CodecDecodeSelf(d) } } } } else { - yyj1419 := 0 - for ; !r.CheckBreak(); yyj1419++ { + yyj1348 := 0 + for ; !r.CheckBreak(); yyj1348++ { - if yyj1419 >= len(yyv1419) { - yyv1419 = append(yyv1419, HTTPIngressPath{}) // var yyz1419 HTTPIngressPath - yyc1419 = true + if yyj1348 >= len(yyv1348) { + yyv1348 = append(yyv1348, IngressRule{}) // var yyz1348 IngressRule + yyc1348 = true } - yyh1419.ElemContainerState(yyj1419) - if yyj1419 < len(yyv1419) { + yyh1348.ElemContainerState(yyj1348) + if yyj1348 < len(yyv1348) { if r.TryDecodeAsNil() { - yyv1419[yyj1419] = HTTPIngressPath{} + yyv1348[yyj1348] = IngressRule{} } else { - yyv1422 := &yyv1419[yyj1419] - yyv1422.CodecDecodeSelf(d) + yyv1351 := &yyv1348[yyj1348] + yyv1351.CodecDecodeSelf(d) } } else { @@ -17540,115 +16712,115 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 } } - if yyj1419 < len(yyv1419) { - yyv1419 = yyv1419[:yyj1419] - yyc1419 = true - } else if yyj1419 == 0 && yyv1419 == nil { - yyv1419 = []HTTPIngressPath{} - yyc1419 = true + if yyj1348 < len(yyv1348) { + yyv1348 = yyv1348[:yyj1348] + yyc1348 = true + } else if yyj1348 == 0 && yyv1348 == nil { + yyv1348 = []IngressRule{} + yyc1348 = true } } - yyh1419.End() - if yyc1419 { - *v = yyv1419 + yyh1348.End() + if yyc1348 { + *v = yyv1348 } } -func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1423 := range v { + for _, yyv1352 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1424 := &yyv1423 - yy1424.CodecEncodeSelf(e) + yy1353 := &yyv1352 + yy1353.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1425 := *v - yyh1425, yyl1425 := z.DecSliceHelperStart() - var yyc1425 bool - if yyl1425 == 0 { - if yyv1425 == nil { - yyv1425 = []ReplicaSet{} - yyc1425 = true - } else if len(yyv1425) != 0 { - yyv1425 = yyv1425[:0] - yyc1425 = true + yyv1354 := *v + yyh1354, yyl1354 := z.DecSliceHelperStart() + var yyc1354 bool + if yyl1354 == 0 { + if yyv1354 == nil { + yyv1354 = []HTTPIngressPath{} + yyc1354 = true + } else if len(yyv1354) != 0 { + yyv1354 = yyv1354[:0] + yyc1354 = true } - } else if yyl1425 > 0 { - var yyrr1425, yyrl1425 int - var yyrt1425 bool - if yyl1425 > cap(yyv1425) { + } else if yyl1354 > 0 { + var yyrr1354, yyrl1354 int + var yyrt1354 bool + if yyl1354 > cap(yyv1354) { - yyrg1425 := len(yyv1425) > 0 - yyv21425 := yyv1425 - yyrl1425, yyrt1425 = z.DecInferLen(yyl1425, z.DecBasicHandle().MaxInitLen, 744) - if yyrt1425 { - if yyrl1425 <= cap(yyv1425) { - yyv1425 = yyv1425[:yyrl1425] + yyrg1354 := len(yyv1354) > 0 + yyv21354 := yyv1354 + yyrl1354, yyrt1354 = z.DecInferLen(yyl1354, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1354 { + if yyrl1354 <= cap(yyv1354) { + yyv1354 = yyv1354[:yyrl1354] } else { - yyv1425 = make([]ReplicaSet, yyrl1425) + yyv1354 = make([]HTTPIngressPath, yyrl1354) } } else { - yyv1425 = make([]ReplicaSet, yyrl1425) + yyv1354 = make([]HTTPIngressPath, yyrl1354) } - yyc1425 = true - yyrr1425 = len(yyv1425) - if yyrg1425 { - copy(yyv1425, yyv21425) + yyc1354 = true + yyrr1354 = len(yyv1354) + if yyrg1354 { + copy(yyv1354, yyv21354) } - } else if yyl1425 != len(yyv1425) { - yyv1425 = yyv1425[:yyl1425] - yyc1425 = true + } else if yyl1354 != len(yyv1354) { + yyv1354 = yyv1354[:yyl1354] + yyc1354 = true } - yyj1425 := 0 - for ; yyj1425 < yyrr1425; yyj1425++ { - yyh1425.ElemContainerState(yyj1425) + yyj1354 := 0 + for ; yyj1354 < yyrr1354; yyj1354++ { + yyh1354.ElemContainerState(yyj1354) if r.TryDecodeAsNil() { - yyv1425[yyj1425] = ReplicaSet{} + yyv1354[yyj1354] = HTTPIngressPath{} } else { - yyv1426 := &yyv1425[yyj1425] - yyv1426.CodecDecodeSelf(d) + yyv1355 := &yyv1354[yyj1354] + yyv1355.CodecDecodeSelf(d) } } - if yyrt1425 { - for ; yyj1425 < yyl1425; yyj1425++ { - yyv1425 = append(yyv1425, ReplicaSet{}) - yyh1425.ElemContainerState(yyj1425) + if yyrt1354 { + for ; yyj1354 < yyl1354; yyj1354++ { + yyv1354 = append(yyv1354, HTTPIngressPath{}) + yyh1354.ElemContainerState(yyj1354) if r.TryDecodeAsNil() { - yyv1425[yyj1425] = ReplicaSet{} + yyv1354[yyj1354] = HTTPIngressPath{} } else { - yyv1427 := &yyv1425[yyj1425] - yyv1427.CodecDecodeSelf(d) + yyv1356 := &yyv1354[yyj1354] + yyv1356.CodecDecodeSelf(d) } } } } else { - yyj1425 := 0 - for ; !r.CheckBreak(); yyj1425++ { + yyj1354 := 0 + for ; !r.CheckBreak(); yyj1354++ { - if yyj1425 >= len(yyv1425) { - yyv1425 = append(yyv1425, ReplicaSet{}) // var yyz1425 ReplicaSet - yyc1425 = true + if yyj1354 >= len(yyv1354) { + yyv1354 = append(yyv1354, HTTPIngressPath{}) // var yyz1354 HTTPIngressPath + yyc1354 = true } - yyh1425.ElemContainerState(yyj1425) - if yyj1425 < len(yyv1425) { + yyh1354.ElemContainerState(yyj1354) + if yyj1354 < len(yyv1354) { if r.TryDecodeAsNil() { - yyv1425[yyj1425] = ReplicaSet{} + yyv1354[yyj1354] = HTTPIngressPath{} } else { - yyv1428 := &yyv1425[yyj1425] - yyv1428.CodecDecodeSelf(d) + yyv1357 := &yyv1354[yyj1354] + yyv1357.CodecDecodeSelf(d) } } else { @@ -17656,112 +16828,115 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode } } - if yyj1425 < len(yyv1425) { - yyv1425 = yyv1425[:yyj1425] - yyc1425 = true - } else if yyj1425 == 0 && yyv1425 == nil { - yyv1425 = []ReplicaSet{} - yyc1425 = true + if yyj1354 < len(yyv1354) { + yyv1354 = yyv1354[:yyj1354] + yyc1354 = true + } else if yyj1354 == 0 && yyv1354 == nil { + yyv1354 = []HTTPIngressPath{} + yyc1354 = true } } - yyh1425.End() - if yyc1425 { - *v = yyv1425 + yyh1354.End() + if yyc1354 { + *v = yyv1354 } } -func (x codecSelfer1234) encSliceapi_Capability(v []pkg2_api.Capability, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1429 := range v { + for _, yyv1358 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1430 := z.EncBinary() - _ = yym1430 - if false { - } else if z.HasExtensions() && z.EncExt(yyv1429) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1429)) - } + yy1359 := &yyv1358 + yy1359.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1431 := *v - yyh1431, yyl1431 := z.DecSliceHelperStart() - var yyc1431 bool - if yyl1431 == 0 { - if yyv1431 == nil { - yyv1431 = []pkg2_api.Capability{} - yyc1431 = true - } else if len(yyv1431) != 0 { - yyv1431 = yyv1431[:0] - yyc1431 = true + yyv1360 := *v + yyh1360, yyl1360 := z.DecSliceHelperStart() + var yyc1360 bool + if yyl1360 == 0 { + if yyv1360 == nil { + yyv1360 = []ReplicaSet{} + yyc1360 = true + } else if len(yyv1360) != 0 { + yyv1360 = yyv1360[:0] + yyc1360 = true } - } else if yyl1431 > 0 { - var yyrr1431, yyrl1431 int - var yyrt1431 bool - if yyl1431 > cap(yyv1431) { + } else if yyl1360 > 0 { + var yyrr1360, yyrl1360 int + var yyrt1360 bool + if yyl1360 > cap(yyv1360) { - yyrl1431, yyrt1431 = z.DecInferLen(yyl1431, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1431 { - if yyrl1431 <= cap(yyv1431) { - yyv1431 = yyv1431[:yyrl1431] + yyrg1360 := len(yyv1360) > 0 + yyv21360 := yyv1360 + yyrl1360, yyrt1360 = z.DecInferLen(yyl1360, z.DecBasicHandle().MaxInitLen, 744) + if yyrt1360 { + if yyrl1360 <= cap(yyv1360) { + yyv1360 = yyv1360[:yyrl1360] } else { - yyv1431 = make([]pkg2_api.Capability, yyrl1431) + yyv1360 = make([]ReplicaSet, yyrl1360) } } else { - yyv1431 = make([]pkg2_api.Capability, yyrl1431) + yyv1360 = make([]ReplicaSet, yyrl1360) } - yyc1431 = true - yyrr1431 = len(yyv1431) - } else if yyl1431 != len(yyv1431) { - yyv1431 = yyv1431[:yyl1431] - yyc1431 = true + yyc1360 = true + yyrr1360 = len(yyv1360) + if yyrg1360 { + copy(yyv1360, yyv21360) + } + } else if yyl1360 != len(yyv1360) { + yyv1360 = yyv1360[:yyl1360] + yyc1360 = true } - yyj1431 := 0 - for ; yyj1431 < yyrr1431; yyj1431++ { - yyh1431.ElemContainerState(yyj1431) + yyj1360 := 0 + for ; yyj1360 < yyrr1360; yyj1360++ { + yyh1360.ElemContainerState(yyj1360) if r.TryDecodeAsNil() { - yyv1431[yyj1431] = "" + yyv1360[yyj1360] = ReplicaSet{} } else { - yyv1431[yyj1431] = pkg2_api.Capability(r.DecodeString()) + yyv1361 := &yyv1360[yyj1360] + yyv1361.CodecDecodeSelf(d) } } - if yyrt1431 { - for ; yyj1431 < yyl1431; yyj1431++ { - yyv1431 = append(yyv1431, "") - yyh1431.ElemContainerState(yyj1431) + if yyrt1360 { + for ; yyj1360 < yyl1360; yyj1360++ { + yyv1360 = append(yyv1360, ReplicaSet{}) + yyh1360.ElemContainerState(yyj1360) if r.TryDecodeAsNil() { - yyv1431[yyj1431] = "" + yyv1360[yyj1360] = ReplicaSet{} } else { - yyv1431[yyj1431] = pkg2_api.Capability(r.DecodeString()) + yyv1362 := &yyv1360[yyj1360] + yyv1362.CodecDecodeSelf(d) } } } } else { - yyj1431 := 0 - for ; !r.CheckBreak(); yyj1431++ { + yyj1360 := 0 + for ; !r.CheckBreak(); yyj1360++ { - if yyj1431 >= len(yyv1431) { - yyv1431 = append(yyv1431, "") // var yyz1431 pkg2_api.Capability - yyc1431 = true + if yyj1360 >= len(yyv1360) { + yyv1360 = append(yyv1360, ReplicaSet{}) // var yyz1360 ReplicaSet + yyc1360 = true } - yyh1431.ElemContainerState(yyj1431) - if yyj1431 < len(yyv1431) { + yyh1360.ElemContainerState(yyj1360) + if yyj1360 < len(yyv1360) { if r.TryDecodeAsNil() { - yyv1431[yyj1431] = "" + yyv1360[yyj1360] = ReplicaSet{} } else { - yyv1431[yyj1431] = pkg2_api.Capability(r.DecodeString()) + yyv1363 := &yyv1360[yyj1360] + yyv1363.CodecDecodeSelf(d) } } else { @@ -17769,106 +16944,112 @@ func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *cod } } - if yyj1431 < len(yyv1431) { - yyv1431 = yyv1431[:yyj1431] - yyc1431 = true - } else if yyj1431 == 0 && yyv1431 == nil { - yyv1431 = []pkg2_api.Capability{} - yyc1431 = true + if yyj1360 < len(yyv1360) { + yyv1360 = yyv1360[:yyj1360] + yyc1360 = true + } else if yyj1360 == 0 && yyv1360 == nil { + yyv1360 = []ReplicaSet{} + yyc1360 = true } } - yyh1431.End() - if yyc1431 { - *v = yyv1431 + yyh1360.End() + if yyc1360 { + *v = yyv1360 } } -func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceapi_Capability(v []pkg2_api.Capability, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1435 := range v { + for _, yyv1364 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1435.CodecEncodeSelf(e) + yym1365 := z.EncBinary() + _ = yym1365 + if false { + } else if z.HasExtensions() && z.EncExt(yyv1364) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1364)) + } } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceapi_Capability(v *[]pkg2_api.Capability, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1436 := *v - yyh1436, yyl1436 := z.DecSliceHelperStart() - var yyc1436 bool - if yyl1436 == 0 { - if yyv1436 == nil { - yyv1436 = []FSType{} - yyc1436 = true - } else if len(yyv1436) != 0 { - yyv1436 = yyv1436[:0] - yyc1436 = true + yyv1366 := *v + yyh1366, yyl1366 := z.DecSliceHelperStart() + var yyc1366 bool + if yyl1366 == 0 { + if yyv1366 == nil { + yyv1366 = []pkg2_api.Capability{} + yyc1366 = true + } else if len(yyv1366) != 0 { + yyv1366 = yyv1366[:0] + yyc1366 = true } - } else if yyl1436 > 0 { - var yyrr1436, yyrl1436 int - var yyrt1436 bool - if yyl1436 > cap(yyv1436) { + } else if yyl1366 > 0 { + var yyrr1366, yyrl1366 int + var yyrt1366 bool + if yyl1366 > cap(yyv1366) { - yyrl1436, yyrt1436 = z.DecInferLen(yyl1436, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1436 { - if yyrl1436 <= cap(yyv1436) { - yyv1436 = yyv1436[:yyrl1436] + yyrl1366, yyrt1366 = z.DecInferLen(yyl1366, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1366 { + if yyrl1366 <= cap(yyv1366) { + yyv1366 = yyv1366[:yyrl1366] } else { - yyv1436 = make([]FSType, yyrl1436) + yyv1366 = make([]pkg2_api.Capability, yyrl1366) } } else { - yyv1436 = make([]FSType, yyrl1436) + yyv1366 = make([]pkg2_api.Capability, yyrl1366) } - yyc1436 = true - yyrr1436 = len(yyv1436) - } else if yyl1436 != len(yyv1436) { - yyv1436 = yyv1436[:yyl1436] - yyc1436 = true + yyc1366 = true + yyrr1366 = len(yyv1366) + } else if yyl1366 != len(yyv1366) { + yyv1366 = yyv1366[:yyl1366] + yyc1366 = true } - yyj1436 := 0 - for ; yyj1436 < yyrr1436; yyj1436++ { - yyh1436.ElemContainerState(yyj1436) + yyj1366 := 0 + for ; yyj1366 < yyrr1366; yyj1366++ { + yyh1366.ElemContainerState(yyj1366) if r.TryDecodeAsNil() { - yyv1436[yyj1436] = "" + yyv1366[yyj1366] = "" } else { - yyv1436[yyj1436] = FSType(r.DecodeString()) + yyv1366[yyj1366] = pkg2_api.Capability(r.DecodeString()) } } - if yyrt1436 { - for ; yyj1436 < yyl1436; yyj1436++ { - yyv1436 = append(yyv1436, "") - yyh1436.ElemContainerState(yyj1436) + if yyrt1366 { + for ; yyj1366 < yyl1366; yyj1366++ { + yyv1366 = append(yyv1366, "") + yyh1366.ElemContainerState(yyj1366) if r.TryDecodeAsNil() { - yyv1436[yyj1436] = "" + yyv1366[yyj1366] = "" } else { - yyv1436[yyj1436] = FSType(r.DecodeString()) + yyv1366[yyj1366] = pkg2_api.Capability(r.DecodeString()) } } } } else { - yyj1436 := 0 - for ; !r.CheckBreak(); yyj1436++ { + yyj1366 := 0 + for ; !r.CheckBreak(); yyj1366++ { - if yyj1436 >= len(yyv1436) { - yyv1436 = append(yyv1436, "") // var yyz1436 FSType - yyc1436 = true + if yyj1366 >= len(yyv1366) { + yyv1366 = append(yyv1366, "") // var yyz1366 pkg2_api.Capability + yyc1366 = true } - yyh1436.ElemContainerState(yyj1436) - if yyj1436 < len(yyv1436) { + yyh1366.ElemContainerState(yyj1366) + if yyj1366 < len(yyv1366) { if r.TryDecodeAsNil() { - yyv1436[yyj1436] = "" + yyv1366[yyj1366] = "" } else { - yyv1436[yyj1436] = FSType(r.DecodeString()) + yyv1366[yyj1366] = pkg2_api.Capability(r.DecodeString()) } } else { @@ -17876,115 +17057,106 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { } } - if yyj1436 < len(yyv1436) { - yyv1436 = yyv1436[:yyj1436] - yyc1436 = true - } else if yyj1436 == 0 && yyv1436 == nil { - yyv1436 = []FSType{} - yyc1436 = true + if yyj1366 < len(yyv1366) { + yyv1366 = yyv1366[:yyj1366] + yyc1366 = true + } else if yyj1366 == 0 && yyv1366 == nil { + yyv1366 = []pkg2_api.Capability{} + yyc1366 = true } } - yyh1436.End() - if yyc1436 { - *v = yyv1436 + yyh1366.End() + if yyc1366 { + *v = yyv1366 } } -func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1440 := range v { + for _, yyv1370 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1441 := &yyv1440 - yy1441.CodecEncodeSelf(e) + yyv1370.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1442 := *v - yyh1442, yyl1442 := z.DecSliceHelperStart() - var yyc1442 bool - if yyl1442 == 0 { - if yyv1442 == nil { - yyv1442 = []HostPortRange{} - yyc1442 = true - } else if len(yyv1442) != 0 { - yyv1442 = yyv1442[:0] - yyc1442 = true + yyv1371 := *v + yyh1371, yyl1371 := z.DecSliceHelperStart() + var yyc1371 bool + if yyl1371 == 0 { + if yyv1371 == nil { + yyv1371 = []FSType{} + yyc1371 = true + } else if len(yyv1371) != 0 { + yyv1371 = yyv1371[:0] + yyc1371 = true } - } else if yyl1442 > 0 { - var yyrr1442, yyrl1442 int - var yyrt1442 bool - if yyl1442 > cap(yyv1442) { + } else if yyl1371 > 0 { + var yyrr1371, yyrl1371 int + var yyrt1371 bool + if yyl1371 > cap(yyv1371) { - yyrg1442 := len(yyv1442) > 0 - yyv21442 := yyv1442 - yyrl1442, yyrt1442 = z.DecInferLen(yyl1442, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1442 { - if yyrl1442 <= cap(yyv1442) { - yyv1442 = yyv1442[:yyrl1442] + yyrl1371, yyrt1371 = z.DecInferLen(yyl1371, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1371 { + if yyrl1371 <= cap(yyv1371) { + yyv1371 = yyv1371[:yyrl1371] } else { - yyv1442 = make([]HostPortRange, yyrl1442) + yyv1371 = make([]FSType, yyrl1371) } } else { - yyv1442 = make([]HostPortRange, yyrl1442) + yyv1371 = make([]FSType, yyrl1371) } - yyc1442 = true - yyrr1442 = len(yyv1442) - if yyrg1442 { - copy(yyv1442, yyv21442) - } - } else if yyl1442 != len(yyv1442) { - yyv1442 = yyv1442[:yyl1442] - yyc1442 = true + yyc1371 = true + yyrr1371 = len(yyv1371) + } else if yyl1371 != len(yyv1371) { + yyv1371 = yyv1371[:yyl1371] + yyc1371 = true } - yyj1442 := 0 - for ; yyj1442 < yyrr1442; yyj1442++ { - yyh1442.ElemContainerState(yyj1442) + yyj1371 := 0 + for ; yyj1371 < yyrr1371; yyj1371++ { + yyh1371.ElemContainerState(yyj1371) if r.TryDecodeAsNil() { - yyv1442[yyj1442] = HostPortRange{} + yyv1371[yyj1371] = "" } else { - yyv1443 := &yyv1442[yyj1442] - yyv1443.CodecDecodeSelf(d) + yyv1371[yyj1371] = FSType(r.DecodeString()) } } - if yyrt1442 { - for ; yyj1442 < yyl1442; yyj1442++ { - yyv1442 = append(yyv1442, HostPortRange{}) - yyh1442.ElemContainerState(yyj1442) + if yyrt1371 { + for ; yyj1371 < yyl1371; yyj1371++ { + yyv1371 = append(yyv1371, "") + yyh1371.ElemContainerState(yyj1371) if r.TryDecodeAsNil() { - yyv1442[yyj1442] = HostPortRange{} + yyv1371[yyj1371] = "" } else { - yyv1444 := &yyv1442[yyj1442] - yyv1444.CodecDecodeSelf(d) + yyv1371[yyj1371] = FSType(r.DecodeString()) } } } } else { - yyj1442 := 0 - for ; !r.CheckBreak(); yyj1442++ { + yyj1371 := 0 + for ; !r.CheckBreak(); yyj1371++ { - if yyj1442 >= len(yyv1442) { - yyv1442 = append(yyv1442, HostPortRange{}) // var yyz1442 HostPortRange - yyc1442 = true + if yyj1371 >= len(yyv1371) { + yyv1371 = append(yyv1371, "") // var yyz1371 FSType + yyc1371 = true } - yyh1442.ElemContainerState(yyj1442) - if yyj1442 < len(yyv1442) { + yyh1371.ElemContainerState(yyj1371) + if yyj1371 < len(yyv1371) { if r.TryDecodeAsNil() { - yyv1442[yyj1442] = HostPortRange{} + yyv1371[yyj1371] = "" } else { - yyv1445 := &yyv1442[yyj1442] - yyv1445.CodecDecodeSelf(d) + yyv1371[yyj1371] = FSType(r.DecodeString()) } } else { @@ -17992,115 +17164,115 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. } } - if yyj1442 < len(yyv1442) { - yyv1442 = yyv1442[:yyj1442] - yyc1442 = true - } else if yyj1442 == 0 && yyv1442 == nil { - yyv1442 = []HostPortRange{} - yyc1442 = true + if yyj1371 < len(yyv1371) { + yyv1371 = yyv1371[:yyj1371] + yyc1371 = true + } else if yyj1371 == 0 && yyv1371 == nil { + yyv1371 = []FSType{} + yyc1371 = true } } - yyh1442.End() - if yyc1442 { - *v = yyv1442 + yyh1371.End() + if yyc1371 { + *v = yyv1371 } } -func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1446 := range v { + for _, yyv1375 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1447 := &yyv1446 - yy1447.CodecEncodeSelf(e) + yy1376 := &yyv1375 + yy1376.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1448 := *v - yyh1448, yyl1448 := z.DecSliceHelperStart() - var yyc1448 bool - if yyl1448 == 0 { - if yyv1448 == nil { - yyv1448 = []IDRange{} - yyc1448 = true - } else if len(yyv1448) != 0 { - yyv1448 = yyv1448[:0] - yyc1448 = true + yyv1377 := *v + yyh1377, yyl1377 := z.DecSliceHelperStart() + var yyc1377 bool + if yyl1377 == 0 { + if yyv1377 == nil { + yyv1377 = []HostPortRange{} + yyc1377 = true + } else if len(yyv1377) != 0 { + yyv1377 = yyv1377[:0] + yyc1377 = true } - } else if yyl1448 > 0 { - var yyrr1448, yyrl1448 int - var yyrt1448 bool - if yyl1448 > cap(yyv1448) { + } else if yyl1377 > 0 { + var yyrr1377, yyrl1377 int + var yyrt1377 bool + if yyl1377 > cap(yyv1377) { - yyrg1448 := len(yyv1448) > 0 - yyv21448 := yyv1448 - yyrl1448, yyrt1448 = z.DecInferLen(yyl1448, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1448 { - if yyrl1448 <= cap(yyv1448) { - yyv1448 = yyv1448[:yyrl1448] + yyrg1377 := len(yyv1377) > 0 + yyv21377 := yyv1377 + yyrl1377, yyrt1377 = z.DecInferLen(yyl1377, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1377 { + if yyrl1377 <= cap(yyv1377) { + yyv1377 = yyv1377[:yyrl1377] } else { - yyv1448 = make([]IDRange, yyrl1448) + yyv1377 = make([]HostPortRange, yyrl1377) } } else { - yyv1448 = make([]IDRange, yyrl1448) + yyv1377 = make([]HostPortRange, yyrl1377) } - yyc1448 = true - yyrr1448 = len(yyv1448) - if yyrg1448 { - copy(yyv1448, yyv21448) + yyc1377 = true + yyrr1377 = len(yyv1377) + if yyrg1377 { + copy(yyv1377, yyv21377) } - } else if yyl1448 != len(yyv1448) { - yyv1448 = yyv1448[:yyl1448] - yyc1448 = true + } else if yyl1377 != len(yyv1377) { + yyv1377 = yyv1377[:yyl1377] + yyc1377 = true } - yyj1448 := 0 - for ; yyj1448 < yyrr1448; yyj1448++ { - yyh1448.ElemContainerState(yyj1448) + yyj1377 := 0 + for ; yyj1377 < yyrr1377; yyj1377++ { + yyh1377.ElemContainerState(yyj1377) if r.TryDecodeAsNil() { - yyv1448[yyj1448] = IDRange{} + yyv1377[yyj1377] = HostPortRange{} } else { - yyv1449 := &yyv1448[yyj1448] - yyv1449.CodecDecodeSelf(d) + yyv1378 := &yyv1377[yyj1377] + yyv1378.CodecDecodeSelf(d) } } - if yyrt1448 { - for ; yyj1448 < yyl1448; yyj1448++ { - yyv1448 = append(yyv1448, IDRange{}) - yyh1448.ElemContainerState(yyj1448) + if yyrt1377 { + for ; yyj1377 < yyl1377; yyj1377++ { + yyv1377 = append(yyv1377, HostPortRange{}) + yyh1377.ElemContainerState(yyj1377) if r.TryDecodeAsNil() { - yyv1448[yyj1448] = IDRange{} + yyv1377[yyj1377] = HostPortRange{} } else { - yyv1450 := &yyv1448[yyj1448] - yyv1450.CodecDecodeSelf(d) + yyv1379 := &yyv1377[yyj1377] + yyv1379.CodecDecodeSelf(d) } } } } else { - yyj1448 := 0 - for ; !r.CheckBreak(); yyj1448++ { + yyj1377 := 0 + for ; !r.CheckBreak(); yyj1377++ { - if yyj1448 >= len(yyv1448) { - yyv1448 = append(yyv1448, IDRange{}) // var yyz1448 IDRange - yyc1448 = true + if yyj1377 >= len(yyv1377) { + yyv1377 = append(yyv1377, HostPortRange{}) // var yyz1377 HostPortRange + yyc1377 = true } - yyh1448.ElemContainerState(yyj1448) - if yyj1448 < len(yyv1448) { + yyh1377.ElemContainerState(yyj1377) + if yyj1377 < len(yyv1377) { if r.TryDecodeAsNil() { - yyv1448[yyj1448] = IDRange{} + yyv1377[yyj1377] = HostPortRange{} } else { - yyv1451 := &yyv1448[yyj1448] - yyv1451.CodecDecodeSelf(d) + yyv1380 := &yyv1377[yyj1377] + yyv1380.CodecDecodeSelf(d) } } else { @@ -18108,115 +17280,115 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { } } - if yyj1448 < len(yyv1448) { - yyv1448 = yyv1448[:yyj1448] - yyc1448 = true - } else if yyj1448 == 0 && yyv1448 == nil { - yyv1448 = []IDRange{} - yyc1448 = true + if yyj1377 < len(yyv1377) { + yyv1377 = yyv1377[:yyj1377] + yyc1377 = true + } else if yyj1377 == 0 && yyv1377 == nil { + yyv1377 = []HostPortRange{} + yyc1377 = true } } - yyh1448.End() - if yyc1448 { - *v = yyv1448 + yyh1377.End() + if yyc1377 { + *v = yyv1377 } } -func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1452 := range v { + for _, yyv1381 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1453 := &yyv1452 - yy1453.CodecEncodeSelf(e) + yy1382 := &yyv1381 + yy1382.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1454 := *v - yyh1454, yyl1454 := z.DecSliceHelperStart() - var yyc1454 bool - if yyl1454 == 0 { - if yyv1454 == nil { - yyv1454 = []PodSecurityPolicy{} - yyc1454 = true - } else if len(yyv1454) != 0 { - yyv1454 = yyv1454[:0] - yyc1454 = true + yyv1383 := *v + yyh1383, yyl1383 := z.DecSliceHelperStart() + var yyc1383 bool + if yyl1383 == 0 { + if yyv1383 == nil { + yyv1383 = []IDRange{} + yyc1383 = true + } else if len(yyv1383) != 0 { + yyv1383 = yyv1383[:0] + yyc1383 = true } - } else if yyl1454 > 0 { - var yyrr1454, yyrl1454 int - var yyrt1454 bool - if yyl1454 > cap(yyv1454) { + } else if yyl1383 > 0 { + var yyrr1383, yyrl1383 int + var yyrt1383 bool + if yyl1383 > cap(yyv1383) { - yyrg1454 := len(yyv1454) > 0 - yyv21454 := yyv1454 - yyrl1454, yyrt1454 = z.DecInferLen(yyl1454, z.DecBasicHandle().MaxInitLen, 552) - if yyrt1454 { - if yyrl1454 <= cap(yyv1454) { - yyv1454 = yyv1454[:yyrl1454] + yyrg1383 := len(yyv1383) > 0 + yyv21383 := yyv1383 + yyrl1383, yyrt1383 = z.DecInferLen(yyl1383, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1383 { + if yyrl1383 <= cap(yyv1383) { + yyv1383 = yyv1383[:yyrl1383] } else { - yyv1454 = make([]PodSecurityPolicy, yyrl1454) + yyv1383 = make([]IDRange, yyrl1383) } } else { - yyv1454 = make([]PodSecurityPolicy, yyrl1454) + yyv1383 = make([]IDRange, yyrl1383) } - yyc1454 = true - yyrr1454 = len(yyv1454) - if yyrg1454 { - copy(yyv1454, yyv21454) + yyc1383 = true + yyrr1383 = len(yyv1383) + if yyrg1383 { + copy(yyv1383, yyv21383) } - } else if yyl1454 != len(yyv1454) { - yyv1454 = yyv1454[:yyl1454] - yyc1454 = true + } else if yyl1383 != len(yyv1383) { + yyv1383 = yyv1383[:yyl1383] + yyc1383 = true } - yyj1454 := 0 - for ; yyj1454 < yyrr1454; yyj1454++ { - yyh1454.ElemContainerState(yyj1454) + yyj1383 := 0 + for ; yyj1383 < yyrr1383; yyj1383++ { + yyh1383.ElemContainerState(yyj1383) if r.TryDecodeAsNil() { - yyv1454[yyj1454] = PodSecurityPolicy{} + yyv1383[yyj1383] = IDRange{} } else { - yyv1455 := &yyv1454[yyj1454] - yyv1455.CodecDecodeSelf(d) + yyv1384 := &yyv1383[yyj1383] + yyv1384.CodecDecodeSelf(d) } } - if yyrt1454 { - for ; yyj1454 < yyl1454; yyj1454++ { - yyv1454 = append(yyv1454, PodSecurityPolicy{}) - yyh1454.ElemContainerState(yyj1454) + if yyrt1383 { + for ; yyj1383 < yyl1383; yyj1383++ { + yyv1383 = append(yyv1383, IDRange{}) + yyh1383.ElemContainerState(yyj1383) if r.TryDecodeAsNil() { - yyv1454[yyj1454] = PodSecurityPolicy{} + yyv1383[yyj1383] = IDRange{} } else { - yyv1456 := &yyv1454[yyj1454] - yyv1456.CodecDecodeSelf(d) + yyv1385 := &yyv1383[yyj1383] + yyv1385.CodecDecodeSelf(d) } } } } else { - yyj1454 := 0 - for ; !r.CheckBreak(); yyj1454++ { + yyj1383 := 0 + for ; !r.CheckBreak(); yyj1383++ { - if yyj1454 >= len(yyv1454) { - yyv1454 = append(yyv1454, PodSecurityPolicy{}) // var yyz1454 PodSecurityPolicy - yyc1454 = true + if yyj1383 >= len(yyv1383) { + yyv1383 = append(yyv1383, IDRange{}) // var yyz1383 IDRange + yyc1383 = true } - yyh1454.ElemContainerState(yyj1454) - if yyj1454 < len(yyv1454) { + yyh1383.ElemContainerState(yyj1383) + if yyj1383 < len(yyv1383) { if r.TryDecodeAsNil() { - yyv1454[yyj1454] = PodSecurityPolicy{} + yyv1383[yyj1383] = IDRange{} } else { - yyv1457 := &yyv1454[yyj1454] - yyv1457.CodecDecodeSelf(d) + yyv1386 := &yyv1383[yyj1383] + yyv1386.CodecDecodeSelf(d) } } else { @@ -18224,115 +17396,115 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co } } - if yyj1454 < len(yyv1454) { - yyv1454 = yyv1454[:yyj1454] - yyc1454 = true - } else if yyj1454 == 0 && yyv1454 == nil { - yyv1454 = []PodSecurityPolicy{} - yyc1454 = true + if yyj1383 < len(yyv1383) { + yyv1383 = yyv1383[:yyj1383] + yyc1383 = true + } else if yyj1383 == 0 && yyv1383 == nil { + yyv1383 = []IDRange{} + yyc1383 = true } } - yyh1454.End() - if yyc1454 { - *v = yyv1454 + yyh1383.End() + if yyc1383 { + *v = yyv1383 } } -func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1458 := range v { + for _, yyv1387 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1459 := &yyv1458 - yy1459.CodecEncodeSelf(e) + yy1388 := &yyv1387 + yy1388.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1460 := *v - yyh1460, yyl1460 := z.DecSliceHelperStart() - var yyc1460 bool - if yyl1460 == 0 { - if yyv1460 == nil { - yyv1460 = []NetworkPolicyIngressRule{} - yyc1460 = true - } else if len(yyv1460) != 0 { - yyv1460 = yyv1460[:0] - yyc1460 = true + yyv1389 := *v + yyh1389, yyl1389 := z.DecSliceHelperStart() + var yyc1389 bool + if yyl1389 == 0 { + if yyv1389 == nil { + yyv1389 = []PodSecurityPolicy{} + yyc1389 = true + } else if len(yyv1389) != 0 { + yyv1389 = yyv1389[:0] + yyc1389 = true } - } else if yyl1460 > 0 { - var yyrr1460, yyrl1460 int - var yyrt1460 bool - if yyl1460 > cap(yyv1460) { + } else if yyl1389 > 0 { + var yyrr1389, yyrl1389 int + var yyrt1389 bool + if yyl1389 > cap(yyv1389) { - yyrg1460 := len(yyv1460) > 0 - yyv21460 := yyv1460 - yyrl1460, yyrt1460 = z.DecInferLen(yyl1460, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1460 { - if yyrl1460 <= cap(yyv1460) { - yyv1460 = yyv1460[:yyrl1460] + yyrg1389 := len(yyv1389) > 0 + yyv21389 := yyv1389 + yyrl1389, yyrt1389 = z.DecInferLen(yyl1389, z.DecBasicHandle().MaxInitLen, 552) + if yyrt1389 { + if yyrl1389 <= cap(yyv1389) { + yyv1389 = yyv1389[:yyrl1389] } else { - yyv1460 = make([]NetworkPolicyIngressRule, yyrl1460) + yyv1389 = make([]PodSecurityPolicy, yyrl1389) } } else { - yyv1460 = make([]NetworkPolicyIngressRule, yyrl1460) + yyv1389 = make([]PodSecurityPolicy, yyrl1389) } - yyc1460 = true - yyrr1460 = len(yyv1460) - if yyrg1460 { - copy(yyv1460, yyv21460) + yyc1389 = true + yyrr1389 = len(yyv1389) + if yyrg1389 { + copy(yyv1389, yyv21389) } - } else if yyl1460 != len(yyv1460) { - yyv1460 = yyv1460[:yyl1460] - yyc1460 = true + } else if yyl1389 != len(yyv1389) { + yyv1389 = yyv1389[:yyl1389] + yyc1389 = true } - yyj1460 := 0 - for ; yyj1460 < yyrr1460; yyj1460++ { - yyh1460.ElemContainerState(yyj1460) + yyj1389 := 0 + for ; yyj1389 < yyrr1389; yyj1389++ { + yyh1389.ElemContainerState(yyj1389) if r.TryDecodeAsNil() { - yyv1460[yyj1460] = NetworkPolicyIngressRule{} + yyv1389[yyj1389] = PodSecurityPolicy{} } else { - yyv1461 := &yyv1460[yyj1460] - yyv1461.CodecDecodeSelf(d) + yyv1390 := &yyv1389[yyj1389] + yyv1390.CodecDecodeSelf(d) } } - if yyrt1460 { - for ; yyj1460 < yyl1460; yyj1460++ { - yyv1460 = append(yyv1460, NetworkPolicyIngressRule{}) - yyh1460.ElemContainerState(yyj1460) + if yyrt1389 { + for ; yyj1389 < yyl1389; yyj1389++ { + yyv1389 = append(yyv1389, PodSecurityPolicy{}) + yyh1389.ElemContainerState(yyj1389) if r.TryDecodeAsNil() { - yyv1460[yyj1460] = NetworkPolicyIngressRule{} + yyv1389[yyj1389] = PodSecurityPolicy{} } else { - yyv1462 := &yyv1460[yyj1460] - yyv1462.CodecDecodeSelf(d) + yyv1391 := &yyv1389[yyj1389] + yyv1391.CodecDecodeSelf(d) } } } } else { - yyj1460 := 0 - for ; !r.CheckBreak(); yyj1460++ { + yyj1389 := 0 + for ; !r.CheckBreak(); yyj1389++ { - if yyj1460 >= len(yyv1460) { - yyv1460 = append(yyv1460, NetworkPolicyIngressRule{}) // var yyz1460 NetworkPolicyIngressRule - yyc1460 = true + if yyj1389 >= len(yyv1389) { + yyv1389 = append(yyv1389, PodSecurityPolicy{}) // var yyz1389 PodSecurityPolicy + yyc1389 = true } - yyh1460.ElemContainerState(yyj1460) - if yyj1460 < len(yyv1460) { + yyh1389.ElemContainerState(yyj1389) + if yyj1389 < len(yyv1389) { if r.TryDecodeAsNil() { - yyv1460[yyj1460] = NetworkPolicyIngressRule{} + yyv1389[yyj1389] = PodSecurityPolicy{} } else { - yyv1463 := &yyv1460[yyj1460] - yyv1463.CodecDecodeSelf(d) + yyv1392 := &yyv1389[yyj1389] + yyv1392.CodecDecodeSelf(d) } } else { @@ -18340,115 +17512,115 @@ func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngr } } - if yyj1460 < len(yyv1460) { - yyv1460 = yyv1460[:yyj1460] - yyc1460 = true - } else if yyj1460 == 0 && yyv1460 == nil { - yyv1460 = []NetworkPolicyIngressRule{} - yyc1460 = true + if yyj1389 < len(yyv1389) { + yyv1389 = yyv1389[:yyj1389] + yyc1389 = true + } else if yyj1389 == 0 && yyv1389 == nil { + yyv1389 = []PodSecurityPolicy{} + yyc1389 = true } } - yyh1460.End() - if yyc1460 { - *v = yyv1460 + yyh1389.End() + if yyc1389 { + *v = yyv1389 } } -func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1464 := range v { + for _, yyv1393 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1465 := &yyv1464 - yy1465.CodecEncodeSelf(e) + yy1394 := &yyv1393 + yy1394.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1466 := *v - yyh1466, yyl1466 := z.DecSliceHelperStart() - var yyc1466 bool - if yyl1466 == 0 { - if yyv1466 == nil { - yyv1466 = []NetworkPolicyPort{} - yyc1466 = true - } else if len(yyv1466) != 0 { - yyv1466 = yyv1466[:0] - yyc1466 = true + yyv1395 := *v + yyh1395, yyl1395 := z.DecSliceHelperStart() + var yyc1395 bool + if yyl1395 == 0 { + if yyv1395 == nil { + yyv1395 = []NetworkPolicyIngressRule{} + yyc1395 = true + } else if len(yyv1395) != 0 { + yyv1395 = yyv1395[:0] + yyc1395 = true } - } else if yyl1466 > 0 { - var yyrr1466, yyrl1466 int - var yyrt1466 bool - if yyl1466 > cap(yyv1466) { + } else if yyl1395 > 0 { + var yyrr1395, yyrl1395 int + var yyrt1395 bool + if yyl1395 > cap(yyv1395) { - yyrg1466 := len(yyv1466) > 0 - yyv21466 := yyv1466 - yyrl1466, yyrt1466 = z.DecInferLen(yyl1466, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1466 { - if yyrl1466 <= cap(yyv1466) { - yyv1466 = yyv1466[:yyrl1466] + yyrg1395 := len(yyv1395) > 0 + yyv21395 := yyv1395 + yyrl1395, yyrt1395 = z.DecInferLen(yyl1395, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1395 { + if yyrl1395 <= cap(yyv1395) { + yyv1395 = yyv1395[:yyrl1395] } else { - yyv1466 = make([]NetworkPolicyPort, yyrl1466) + yyv1395 = make([]NetworkPolicyIngressRule, yyrl1395) } } else { - yyv1466 = make([]NetworkPolicyPort, yyrl1466) + yyv1395 = make([]NetworkPolicyIngressRule, yyrl1395) } - yyc1466 = true - yyrr1466 = len(yyv1466) - if yyrg1466 { - copy(yyv1466, yyv21466) + yyc1395 = true + yyrr1395 = len(yyv1395) + if yyrg1395 { + copy(yyv1395, yyv21395) } - } else if yyl1466 != len(yyv1466) { - yyv1466 = yyv1466[:yyl1466] - yyc1466 = true + } else if yyl1395 != len(yyv1395) { + yyv1395 = yyv1395[:yyl1395] + yyc1395 = true } - yyj1466 := 0 - for ; yyj1466 < yyrr1466; yyj1466++ { - yyh1466.ElemContainerState(yyj1466) + yyj1395 := 0 + for ; yyj1395 < yyrr1395; yyj1395++ { + yyh1395.ElemContainerState(yyj1395) if r.TryDecodeAsNil() { - yyv1466[yyj1466] = NetworkPolicyPort{} + yyv1395[yyj1395] = NetworkPolicyIngressRule{} } else { - yyv1467 := &yyv1466[yyj1466] - yyv1467.CodecDecodeSelf(d) + yyv1396 := &yyv1395[yyj1395] + yyv1396.CodecDecodeSelf(d) } } - if yyrt1466 { - for ; yyj1466 < yyl1466; yyj1466++ { - yyv1466 = append(yyv1466, NetworkPolicyPort{}) - yyh1466.ElemContainerState(yyj1466) + if yyrt1395 { + for ; yyj1395 < yyl1395; yyj1395++ { + yyv1395 = append(yyv1395, NetworkPolicyIngressRule{}) + yyh1395.ElemContainerState(yyj1395) if r.TryDecodeAsNil() { - yyv1466[yyj1466] = NetworkPolicyPort{} + yyv1395[yyj1395] = NetworkPolicyIngressRule{} } else { - yyv1468 := &yyv1466[yyj1466] - yyv1468.CodecDecodeSelf(d) + yyv1397 := &yyv1395[yyj1395] + yyv1397.CodecDecodeSelf(d) } } } } else { - yyj1466 := 0 - for ; !r.CheckBreak(); yyj1466++ { + yyj1395 := 0 + for ; !r.CheckBreak(); yyj1395++ { - if yyj1466 >= len(yyv1466) { - yyv1466 = append(yyv1466, NetworkPolicyPort{}) // var yyz1466 NetworkPolicyPort - yyc1466 = true + if yyj1395 >= len(yyv1395) { + yyv1395 = append(yyv1395, NetworkPolicyIngressRule{}) // var yyz1395 NetworkPolicyIngressRule + yyc1395 = true } - yyh1466.ElemContainerState(yyj1466) - if yyj1466 < len(yyv1466) { + yyh1395.ElemContainerState(yyj1395) + if yyj1395 < len(yyv1395) { if r.TryDecodeAsNil() { - yyv1466[yyj1466] = NetworkPolicyPort{} + yyv1395[yyj1395] = NetworkPolicyIngressRule{} } else { - yyv1469 := &yyv1466[yyj1466] - yyv1469.CodecDecodeSelf(d) + yyv1398 := &yyv1395[yyj1395] + yyv1398.CodecDecodeSelf(d) } } else { @@ -18456,115 +17628,115 @@ func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *co } } - if yyj1466 < len(yyv1466) { - yyv1466 = yyv1466[:yyj1466] - yyc1466 = true - } else if yyj1466 == 0 && yyv1466 == nil { - yyv1466 = []NetworkPolicyPort{} - yyc1466 = true + if yyj1395 < len(yyv1395) { + yyv1395 = yyv1395[:yyj1395] + yyc1395 = true + } else if yyj1395 == 0 && yyv1395 == nil { + yyv1395 = []NetworkPolicyIngressRule{} + yyc1395 = true } } - yyh1466.End() - if yyc1466 { - *v = yyv1466 + yyh1395.End() + if yyc1395 { + *v = yyv1395 } } -func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1470 := range v { + for _, yyv1399 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1471 := &yyv1470 - yy1471.CodecEncodeSelf(e) + yy1400 := &yyv1399 + yy1400.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1472 := *v - yyh1472, yyl1472 := z.DecSliceHelperStart() - var yyc1472 bool - if yyl1472 == 0 { - if yyv1472 == nil { - yyv1472 = []NetworkPolicyPeer{} - yyc1472 = true - } else if len(yyv1472) != 0 { - yyv1472 = yyv1472[:0] - yyc1472 = true + yyv1401 := *v + yyh1401, yyl1401 := z.DecSliceHelperStart() + var yyc1401 bool + if yyl1401 == 0 { + if yyv1401 == nil { + yyv1401 = []NetworkPolicyPort{} + yyc1401 = true + } else if len(yyv1401) != 0 { + yyv1401 = yyv1401[:0] + yyc1401 = true } - } else if yyl1472 > 0 { - var yyrr1472, yyrl1472 int - var yyrt1472 bool - if yyl1472 > cap(yyv1472) { + } else if yyl1401 > 0 { + var yyrr1401, yyrl1401 int + var yyrt1401 bool + if yyl1401 > cap(yyv1401) { - yyrg1472 := len(yyv1472) > 0 - yyv21472 := yyv1472 - yyrl1472, yyrt1472 = z.DecInferLen(yyl1472, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1472 { - if yyrl1472 <= cap(yyv1472) { - yyv1472 = yyv1472[:yyrl1472] + yyrg1401 := len(yyv1401) > 0 + yyv21401 := yyv1401 + yyrl1401, yyrt1401 = z.DecInferLen(yyl1401, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1401 { + if yyrl1401 <= cap(yyv1401) { + yyv1401 = yyv1401[:yyrl1401] } else { - yyv1472 = make([]NetworkPolicyPeer, yyrl1472) + yyv1401 = make([]NetworkPolicyPort, yyrl1401) } } else { - yyv1472 = make([]NetworkPolicyPeer, yyrl1472) + yyv1401 = make([]NetworkPolicyPort, yyrl1401) } - yyc1472 = true - yyrr1472 = len(yyv1472) - if yyrg1472 { - copy(yyv1472, yyv21472) + yyc1401 = true + yyrr1401 = len(yyv1401) + if yyrg1401 { + copy(yyv1401, yyv21401) } - } else if yyl1472 != len(yyv1472) { - yyv1472 = yyv1472[:yyl1472] - yyc1472 = true + } else if yyl1401 != len(yyv1401) { + yyv1401 = yyv1401[:yyl1401] + yyc1401 = true } - yyj1472 := 0 - for ; yyj1472 < yyrr1472; yyj1472++ { - yyh1472.ElemContainerState(yyj1472) + yyj1401 := 0 + for ; yyj1401 < yyrr1401; yyj1401++ { + yyh1401.ElemContainerState(yyj1401) if r.TryDecodeAsNil() { - yyv1472[yyj1472] = NetworkPolicyPeer{} + yyv1401[yyj1401] = NetworkPolicyPort{} } else { - yyv1473 := &yyv1472[yyj1472] - yyv1473.CodecDecodeSelf(d) + yyv1402 := &yyv1401[yyj1401] + yyv1402.CodecDecodeSelf(d) } } - if yyrt1472 { - for ; yyj1472 < yyl1472; yyj1472++ { - yyv1472 = append(yyv1472, NetworkPolicyPeer{}) - yyh1472.ElemContainerState(yyj1472) + if yyrt1401 { + for ; yyj1401 < yyl1401; yyj1401++ { + yyv1401 = append(yyv1401, NetworkPolicyPort{}) + yyh1401.ElemContainerState(yyj1401) if r.TryDecodeAsNil() { - yyv1472[yyj1472] = NetworkPolicyPeer{} + yyv1401[yyj1401] = NetworkPolicyPort{} } else { - yyv1474 := &yyv1472[yyj1472] - yyv1474.CodecDecodeSelf(d) + yyv1403 := &yyv1401[yyj1401] + yyv1403.CodecDecodeSelf(d) } } } } else { - yyj1472 := 0 - for ; !r.CheckBreak(); yyj1472++ { + yyj1401 := 0 + for ; !r.CheckBreak(); yyj1401++ { - if yyj1472 >= len(yyv1472) { - yyv1472 = append(yyv1472, NetworkPolicyPeer{}) // var yyz1472 NetworkPolicyPeer - yyc1472 = true + if yyj1401 >= len(yyv1401) { + yyv1401 = append(yyv1401, NetworkPolicyPort{}) // var yyz1401 NetworkPolicyPort + yyc1401 = true } - yyh1472.ElemContainerState(yyj1472) - if yyj1472 < len(yyv1472) { + yyh1401.ElemContainerState(yyj1401) + if yyj1401 < len(yyv1401) { if r.TryDecodeAsNil() { - yyv1472[yyj1472] = NetworkPolicyPeer{} + yyv1401[yyj1401] = NetworkPolicyPort{} } else { - yyv1475 := &yyv1472[yyj1472] - yyv1475.CodecDecodeSelf(d) + yyv1404 := &yyv1401[yyj1401] + yyv1404.CodecDecodeSelf(d) } } else { @@ -18572,115 +17744,115 @@ func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *co } } - if yyj1472 < len(yyv1472) { - yyv1472 = yyv1472[:yyj1472] - yyc1472 = true - } else if yyj1472 == 0 && yyv1472 == nil { - yyv1472 = []NetworkPolicyPeer{} - yyc1472 = true + if yyj1401 < len(yyv1401) { + yyv1401 = yyv1401[:yyj1401] + yyc1401 = true + } else if yyj1401 == 0 && yyv1401 == nil { + yyv1401 = []NetworkPolicyPort{} + yyc1401 = true } } - yyh1472.End() - if yyc1472 { - *v = yyv1472 + yyh1401.End() + if yyc1401 { + *v = yyv1401 } } -func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1476 := range v { + for _, yyv1405 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1477 := &yyv1476 - yy1477.CodecEncodeSelf(e) + yy1406 := &yyv1405 + yy1406.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1478 := *v - yyh1478, yyl1478 := z.DecSliceHelperStart() - var yyc1478 bool - if yyl1478 == 0 { - if yyv1478 == nil { - yyv1478 = []NetworkPolicy{} - yyc1478 = true - } else if len(yyv1478) != 0 { - yyv1478 = yyv1478[:0] - yyc1478 = true + yyv1407 := *v + yyh1407, yyl1407 := z.DecSliceHelperStart() + var yyc1407 bool + if yyl1407 == 0 { + if yyv1407 == nil { + yyv1407 = []NetworkPolicyPeer{} + yyc1407 = true + } else if len(yyv1407) != 0 { + yyv1407 = yyv1407[:0] + yyc1407 = true } - } else if yyl1478 > 0 { - var yyrr1478, yyrl1478 int - var yyrt1478 bool - if yyl1478 > cap(yyv1478) { + } else if yyl1407 > 0 { + var yyrr1407, yyrl1407 int + var yyrt1407 bool + if yyl1407 > cap(yyv1407) { - yyrg1478 := len(yyv1478) > 0 - yyv21478 := yyv1478 - yyrl1478, yyrt1478 = z.DecInferLen(yyl1478, z.DecBasicHandle().MaxInitLen, 312) - if yyrt1478 { - if yyrl1478 <= cap(yyv1478) { - yyv1478 = yyv1478[:yyrl1478] + yyrg1407 := len(yyv1407) > 0 + yyv21407 := yyv1407 + yyrl1407, yyrt1407 = z.DecInferLen(yyl1407, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1407 { + if yyrl1407 <= cap(yyv1407) { + yyv1407 = yyv1407[:yyrl1407] } else { - yyv1478 = make([]NetworkPolicy, yyrl1478) + yyv1407 = make([]NetworkPolicyPeer, yyrl1407) } } else { - yyv1478 = make([]NetworkPolicy, yyrl1478) + yyv1407 = make([]NetworkPolicyPeer, yyrl1407) } - yyc1478 = true - yyrr1478 = len(yyv1478) - if yyrg1478 { - copy(yyv1478, yyv21478) + yyc1407 = true + yyrr1407 = len(yyv1407) + if yyrg1407 { + copy(yyv1407, yyv21407) } - } else if yyl1478 != len(yyv1478) { - yyv1478 = yyv1478[:yyl1478] - yyc1478 = true + } else if yyl1407 != len(yyv1407) { + yyv1407 = yyv1407[:yyl1407] + yyc1407 = true } - yyj1478 := 0 - for ; yyj1478 < yyrr1478; yyj1478++ { - yyh1478.ElemContainerState(yyj1478) + yyj1407 := 0 + for ; yyj1407 < yyrr1407; yyj1407++ { + yyh1407.ElemContainerState(yyj1407) if r.TryDecodeAsNil() { - yyv1478[yyj1478] = NetworkPolicy{} + yyv1407[yyj1407] = NetworkPolicyPeer{} } else { - yyv1479 := &yyv1478[yyj1478] - yyv1479.CodecDecodeSelf(d) + yyv1408 := &yyv1407[yyj1407] + yyv1408.CodecDecodeSelf(d) } } - if yyrt1478 { - for ; yyj1478 < yyl1478; yyj1478++ { - yyv1478 = append(yyv1478, NetworkPolicy{}) - yyh1478.ElemContainerState(yyj1478) + if yyrt1407 { + for ; yyj1407 < yyl1407; yyj1407++ { + yyv1407 = append(yyv1407, NetworkPolicyPeer{}) + yyh1407.ElemContainerState(yyj1407) if r.TryDecodeAsNil() { - yyv1478[yyj1478] = NetworkPolicy{} + yyv1407[yyj1407] = NetworkPolicyPeer{} } else { - yyv1480 := &yyv1478[yyj1478] - yyv1480.CodecDecodeSelf(d) + yyv1409 := &yyv1407[yyj1407] + yyv1409.CodecDecodeSelf(d) } } } } else { - yyj1478 := 0 - for ; !r.CheckBreak(); yyj1478++ { + yyj1407 := 0 + for ; !r.CheckBreak(); yyj1407++ { - if yyj1478 >= len(yyv1478) { - yyv1478 = append(yyv1478, NetworkPolicy{}) // var yyz1478 NetworkPolicy - yyc1478 = true + if yyj1407 >= len(yyv1407) { + yyv1407 = append(yyv1407, NetworkPolicyPeer{}) // var yyz1407 NetworkPolicyPeer + yyc1407 = true } - yyh1478.ElemContainerState(yyj1478) - if yyj1478 < len(yyv1478) { + yyh1407.ElemContainerState(yyj1407) + if yyj1407 < len(yyv1407) { if r.TryDecodeAsNil() { - yyv1478[yyj1478] = NetworkPolicy{} + yyv1407[yyj1407] = NetworkPolicyPeer{} } else { - yyv1481 := &yyv1478[yyj1478] - yyv1481.CodecDecodeSelf(d) + yyv1410 := &yyv1407[yyj1407] + yyv1410.CodecDecodeSelf(d) } } else { @@ -18688,115 +17860,115 @@ func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978. } } - if yyj1478 < len(yyv1478) { - yyv1478 = yyv1478[:yyj1478] - yyc1478 = true - } else if yyj1478 == 0 && yyv1478 == nil { - yyv1478 = []NetworkPolicy{} - yyc1478 = true + if yyj1407 < len(yyv1407) { + yyv1407 = yyv1407[:yyj1407] + yyc1407 = true + } else if yyj1407 == 0 && yyv1407 == nil { + yyv1407 = []NetworkPolicyPeer{} + yyc1407 = true } } - yyh1478.End() - if yyc1478 { - *v = yyv1478 + yyh1407.End() + if yyc1407 { + *v = yyv1407 } } -func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1482 := range v { + for _, yyv1411 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1483 := &yyv1482 - yy1483.CodecEncodeSelf(e) + yy1412 := &yyv1411 + yy1412.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1484 := *v - yyh1484, yyl1484 := z.DecSliceHelperStart() - var yyc1484 bool - if yyl1484 == 0 { - if yyv1484 == nil { - yyv1484 = []StorageClass{} - yyc1484 = true - } else if len(yyv1484) != 0 { - yyv1484 = yyv1484[:0] - yyc1484 = true + yyv1413 := *v + yyh1413, yyl1413 := z.DecSliceHelperStart() + var yyc1413 bool + if yyl1413 == 0 { + if yyv1413 == nil { + yyv1413 = []NetworkPolicy{} + yyc1413 = true + } else if len(yyv1413) != 0 { + yyv1413 = yyv1413[:0] + yyc1413 = true } - } else if yyl1484 > 0 { - var yyrr1484, yyrl1484 int - var yyrt1484 bool - if yyl1484 > cap(yyv1484) { + } else if yyl1413 > 0 { + var yyrr1413, yyrl1413 int + var yyrt1413 bool + if yyl1413 > cap(yyv1413) { - yyrg1484 := len(yyv1484) > 0 - yyv21484 := yyv1484 - yyrl1484, yyrt1484 = z.DecInferLen(yyl1484, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1484 { - if yyrl1484 <= cap(yyv1484) { - yyv1484 = yyv1484[:yyrl1484] + yyrg1413 := len(yyv1413) > 0 + yyv21413 := yyv1413 + yyrl1413, yyrt1413 = z.DecInferLen(yyl1413, z.DecBasicHandle().MaxInitLen, 312) + if yyrt1413 { + if yyrl1413 <= cap(yyv1413) { + yyv1413 = yyv1413[:yyrl1413] } else { - yyv1484 = make([]StorageClass, yyrl1484) + yyv1413 = make([]NetworkPolicy, yyrl1413) } } else { - yyv1484 = make([]StorageClass, yyrl1484) + yyv1413 = make([]NetworkPolicy, yyrl1413) } - yyc1484 = true - yyrr1484 = len(yyv1484) - if yyrg1484 { - copy(yyv1484, yyv21484) + yyc1413 = true + yyrr1413 = len(yyv1413) + if yyrg1413 { + copy(yyv1413, yyv21413) } - } else if yyl1484 != len(yyv1484) { - yyv1484 = yyv1484[:yyl1484] - yyc1484 = true + } else if yyl1413 != len(yyv1413) { + yyv1413 = yyv1413[:yyl1413] + yyc1413 = true } - yyj1484 := 0 - for ; yyj1484 < yyrr1484; yyj1484++ { - yyh1484.ElemContainerState(yyj1484) + yyj1413 := 0 + for ; yyj1413 < yyrr1413; yyj1413++ { + yyh1413.ElemContainerState(yyj1413) if r.TryDecodeAsNil() { - yyv1484[yyj1484] = StorageClass{} + yyv1413[yyj1413] = NetworkPolicy{} } else { - yyv1485 := &yyv1484[yyj1484] - yyv1485.CodecDecodeSelf(d) + yyv1414 := &yyv1413[yyj1413] + yyv1414.CodecDecodeSelf(d) } } - if yyrt1484 { - for ; yyj1484 < yyl1484; yyj1484++ { - yyv1484 = append(yyv1484, StorageClass{}) - yyh1484.ElemContainerState(yyj1484) + if yyrt1413 { + for ; yyj1413 < yyl1413; yyj1413++ { + yyv1413 = append(yyv1413, NetworkPolicy{}) + yyh1413.ElemContainerState(yyj1413) if r.TryDecodeAsNil() { - yyv1484[yyj1484] = StorageClass{} + yyv1413[yyj1413] = NetworkPolicy{} } else { - yyv1486 := &yyv1484[yyj1484] - yyv1486.CodecDecodeSelf(d) + yyv1415 := &yyv1413[yyj1413] + yyv1415.CodecDecodeSelf(d) } } } } else { - yyj1484 := 0 - for ; !r.CheckBreak(); yyj1484++ { + yyj1413 := 0 + for ; !r.CheckBreak(); yyj1413++ { - if yyj1484 >= len(yyv1484) { - yyv1484 = append(yyv1484, StorageClass{}) // var yyz1484 StorageClass - yyc1484 = true + if yyj1413 >= len(yyv1413) { + yyv1413 = append(yyv1413, NetworkPolicy{}) // var yyz1413 NetworkPolicy + yyc1413 = true } - yyh1484.ElemContainerState(yyj1484) - if yyj1484 < len(yyv1484) { + yyh1413.ElemContainerState(yyj1413) + if yyj1413 < len(yyv1413) { if r.TryDecodeAsNil() { - yyv1484[yyj1484] = StorageClass{} + yyv1413[yyj1413] = NetworkPolicy{} } else { - yyv1487 := &yyv1484[yyj1484] - yyv1487.CodecDecodeSelf(d) + yyv1416 := &yyv1413[yyj1413] + yyv1416.CodecDecodeSelf(d) } } else { @@ -18804,16 +17976,16 @@ func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.De } } - if yyj1484 < len(yyv1484) { - yyv1484 = yyv1484[:yyj1484] - yyc1484 = true - } else if yyj1484 == 0 && yyv1484 == nil { - yyv1484 = []StorageClass{} - yyc1484 = true + if yyj1413 < len(yyv1413) { + yyv1413 = yyv1413[:yyj1413] + yyc1413 = true + } else if yyj1413 == 0 && yyv1413 == nil { + yyv1413 = []NetworkPolicy{} + yyc1413 = true } } - yyh1484.End() - if yyc1484 { - *v = yyv1484 + yyh1413.End() + if yyc1413 { + *v = yyv1413 } } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/types.go index 50f9ad418832..864e349bf27f 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/types.go @@ -54,7 +54,7 @@ type ScaleStatus struct { Replicas int32 `json:"replicas"` // label query over pods that should match the replicas count. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *unversioned.LabelSelector `json:"selector,omitempty"` } @@ -64,13 +64,13 @@ type ScaleStatus struct { // represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata. api.ObjectMeta `json:"metadata,omitempty"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Spec ScaleSpec `json:"spec,omitempty"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only. Status ScaleStatus `json:"status,omitempty"` } @@ -339,14 +339,14 @@ type DaemonSetSpec struct { // Selector is a label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *unversioned.LabelSelector `json:"selector,omitempty"` // Template is the object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template Template api.PodTemplateSpec `json:"template"` // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. @@ -393,18 +393,18 @@ type DaemonSetStatus struct { type DaemonSet struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata api.ObjectMeta `json:"metadata,omitempty"` // Spec defines the desired behavior of this daemon set. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec DaemonSetSpec `json:"spec,omitempty"` // Status is the current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status DaemonSetStatus `json:"status,omitempty"` } @@ -412,7 +412,7 @@ type DaemonSet struct { type DaemonSetList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is a list of daemon sets. @@ -422,7 +422,7 @@ type DaemonSetList struct { type ThirdPartyResourceDataList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is a list of third party objects Items []ThirdPartyResourceData `json:"items"` @@ -437,15 +437,15 @@ type ThirdPartyResourceDataList struct { type Ingress struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata api.ObjectMeta `json:"metadata,omitempty"` // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec IngressSpec `json:"spec,omitempty"` // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status IngressStatus `json:"status,omitempty"` } @@ -453,7 +453,7 @@ type Ingress struct { type IngressList struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty"` // Items is the list of Ingress. @@ -613,7 +613,7 @@ type ReplicaSetSpec struct { // Selector is a label query over pods that should match the replica count. // Must match in order to be controlled. // If empty, defaulted to labels on pod template. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *unversioned.LabelSelector `json:"selector,omitempty"` // Template is the object that describes the pod that will be created if @@ -734,7 +734,7 @@ type SELinuxStrategyOptions struct { // Rule is the strategy that will dictate the allowable labels that may be set. Rule SELinuxStrategy `json:"rule"` // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md#security-context SELinuxOptions *api.SELinuxOptions `json:"seLinuxOptions,omitempty"` } @@ -911,41 +911,3 @@ type NetworkPolicyList struct { Items []NetworkPolicy `json:"items"` } - -// +genclient=true -// +nonNamespaced=true - -// StorageClass describes a named "class" of storage offered in a cluster. -// Different classes might map to quality-of-service levels, or to backup policies, -// or to arbitrary policies determined by the cluster administrators. Kubernetes -// itself is unopinionated about what classes represent. This concept is sometimes -// called "profiles" in other storage systems. -// The name of a StorageClass object is significant, and is how users can request a particular class. -type StorageClass struct { - unversioned.TypeMeta `json:",inline"` - api.ObjectMeta `json:"metadata,omitempty"` - - // provisioner is the driver expected to handle this StorageClass. - // This is an optionally-prefixed name, like a label key. - // For example: "kubernetes.io/gce-pd" or "kubernetes.io/aws-ebs". - // This value may not be empty. - Provisioner string `json:"provisioner"` - - // parameters holds parameters for the provisioner. - // These values are opaque to the system and are passed directly - // to the provisioner. The only validation done on keys is that they are - // not empty. The maximum number of parameters is - // 512, with a cumulative max size of 256K - Parameters map[string]string `json:"parameters,omitempty"` -} - -// StorageClassList is a collection of storage classes. -type StorageClassList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty"` - - // Items is the list of StorageClasses - Items []StorageClass `json:"items"` -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/generated.pb.go index ad857c3c3b7e..1f231f96242e 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/generated.pb.go @@ -88,8 +88,6 @@ limitations under the License. Scale ScaleSpec ScaleStatus - StorageClass - StorageClassList SubresourceReference SupplementalGroupsStrategyOptions ThirdPartyResource @@ -397,41 +395,33 @@ func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } - -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } - func (m *SubresourceReference) Reset() { *m = SubresourceReference{} } func (*SubresourceReference) ProtoMessage() {} -func (*SubresourceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*SubresourceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{66} + return fileDescriptorGenerated, []int{64} } func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } func (*ThirdPartyResource) ProtoMessage() {} -func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } func (*ThirdPartyResourceData) ProtoMessage() {} -func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } func (*ThirdPartyResourceDataList) ProtoMessage() {} func (*ThirdPartyResourceDataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{69} + return fileDescriptorGenerated, []int{67} } func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } func (*ThirdPartyResourceList) ProtoMessage() {} -func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func init() { proto.RegisterType((*APIVersion)(nil), "k8s.io.client-go.1.4.pkg.apis.extensions.v1beta1.APIVersion") @@ -497,8 +487,6 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.client-go.1.4.pkg.apis.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.client-go.1.4.pkg.apis.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.client-go.1.4.pkg.apis.extensions.v1beta1.ScaleStatus") - proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.1.4.pkg.apis.extensions.v1beta1.StorageClass") - proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.1.4.pkg.apis.extensions.v1beta1.StorageClassList") proto.RegisterType((*SubresourceReference)(nil), "k8s.io.client-go.1.4.pkg.apis.extensions.v1beta1.SubresourceReference") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.client-go.1.4.pkg.apis.extensions.v1beta1.SupplementalGroupsStrategyOptions") proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.client-go.1.4.pkg.apis.extensions.v1beta1.ThirdPartyResource") @@ -2944,91 +2932,6 @@ func (m *ScaleStatus) MarshalTo(data []byte) (int, error) { return i, nil } -func (m *StorageClass) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StorageClass) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n72, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n72 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner))) - i += copy(data[i:], m.Provisioner) - if len(m.Parameters) > 0 { - for k := range m.Parameters { - data[i] = 0x1a - i++ - v := m.Parameters[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(data, i, uint64(mapSize)) - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(v))) - i += copy(data[i:], v) - } - } - return i, nil -} - -func (m *StorageClassList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *StorageClassList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n73, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n73 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - func (m *SubresourceReference) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -3115,11 +3018,11 @@ func (m *ThirdPartyResource) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n74, err := m.ObjectMeta.MarshalTo(data[i:]) + n72, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n74 + i += n72 data[i] = 0x12 i++ i = encodeVarintGenerated(data, i, uint64(len(m.Description))) @@ -3157,11 +3060,11 @@ func (m *ThirdPartyResourceData) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n75, err := m.ObjectMeta.MarshalTo(data[i:]) + n73, err := m.ObjectMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n75 + i += n73 if m.Data != nil { data[i] = 0x12 i++ @@ -3189,11 +3092,11 @@ func (m *ThirdPartyResourceDataList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n76, err := m.ListMeta.MarshalTo(data[i:]) + n74, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n76 + i += n74 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -3227,11 +3130,11 @@ func (m *ThirdPartyResourceList) MarshalTo(data []byte) (int, error) { data[i] = 0xa i++ i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n77, err := m.ListMeta.MarshalTo(data[i:]) + n75, err := m.ListMeta.MarshalTo(data[i:]) if err != nil { return 0, err } - i += n77 + i += n75 if len(m.Items) > 0 { for _, msg := range m.Items { data[i] = 0x12 @@ -4133,38 +4036,6 @@ func (m *ScaleStatus) Size() (n int) { return n } -func (m *StorageClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *StorageClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *SubresourceReference) Size() (n int) { var l int _ = l @@ -5030,39 +4901,6 @@ func (this *ScaleStatus) String() string { }, "") return s } -func (this *StorageClass) String() string { - if this == nil { - return "nil" - } - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) - } - mapStringForParameters += "}" - s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `}`, - }, "") - return s -} -func (this *StorageClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *SubresourceReference) String() string { if this == nil { return "nil" @@ -13214,337 +13052,6 @@ func (m *ScaleStatus) Unmarshal(data []byte) error { } return nil } -func (m *StorageClass) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provisioner = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Parameters == nil { - m.Parameters = make(map[string]string) - } - m.Parameters[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageClassList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *SubresourceReference) Unmarshal(data []byte) error { l := len(data) iNdEx := 0 @@ -14400,256 +13907,251 @@ var ( ) var fileDescriptorGenerated = []byte{ - // 4015 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x5c, 0xdb, 0x8f, 0x1c, 0x47, - 0x57, 0xcf, 0xcc, 0xec, 0x65, 0xa6, 0xf6, 0xea, 0xf2, 0xda, 0x9e, 0x6c, 0x12, 0x3b, 0xe9, 0x88, - 0x90, 0x88, 0x78, 0x16, 0x9b, 0x24, 0x38, 0x76, 0xe2, 0x64, 0x67, 0x2f, 0xb6, 0x93, 0x5d, 0x7b, - 0x52, 0xb3, 0x76, 0x42, 0xee, 0xbd, 0x33, 0xb5, 0xb3, 0xed, 0xed, 0x99, 0x9e, 0xf4, 0x65, 0xbd, - 0x13, 0x84, 0x12, 0x04, 0x48, 0xbc, 0x24, 0xe4, 0x8d, 0x48, 0xc0, 0x03, 0x12, 0x88, 0x07, 0x44, - 0x04, 0x12, 0x52, 0x1e, 0x78, 0x01, 0x24, 0x84, 0x79, 0x40, 0x84, 0x9b, 0xe0, 0x85, 0x04, 0x82, - 0x20, 0xfa, 0xfe, 0x85, 0x7c, 0x9f, 0xf4, 0x7d, 0xa7, 0xaa, 0xab, 0xbb, 0xab, 0x7a, 0xba, 0xc7, - 0xee, 0xd9, 0x8b, 0xf4, 0xe9, 0x7b, 0x58, 0xc5, 0x5d, 0xe7, 0x9c, 0xdf, 0x39, 0x55, 0x75, 0xea, - 0x9c, 0x53, 0x97, 0x09, 0x7a, 0x71, 0xe7, 0x82, 0x53, 0x31, 0xac, 0x85, 0x1d, 0x6f, 0x93, 0xda, - 0x1d, 0xea, 0x52, 0x67, 0xa1, 0xbb, 0xd3, 0x5a, 0xd0, 0xbb, 0x86, 0xb3, 0x40, 0xf7, 0x5c, 0xda, - 0x71, 0x0c, 0xab, 0xe3, 0x2c, 0xec, 0x9e, 0xdb, 0xa4, 0xae, 0x7e, 0x6e, 0xa1, 0x45, 0x3b, 0xd4, - 0xd6, 0x5d, 0xda, 0xac, 0x74, 0x6d, 0xcb, 0xb5, 0xf0, 0x59, 0x5f, 0xbc, 0x12, 0x89, 0x57, 0x40, - 0xbc, 0xc2, 0xc4, 0x2b, 0x91, 0x78, 0x45, 0x88, 0xcf, 0x9f, 0x6d, 0x19, 0xee, 0xb6, 0xb7, 0x59, - 0x69, 0x58, 0xed, 0x85, 0x96, 0xd5, 0xb2, 0x16, 0x38, 0xca, 0xa6, 0xb7, 0xc5, 0xbf, 0xf8, 0x07, - 0xff, 0x97, 0x8f, 0x3e, 0x7f, 0x3e, 0xd5, 0xb8, 0x05, 0x9b, 0x3a, 0x96, 0x67, 0x37, 0x68, 0xdc, - 0xa2, 0xf9, 0x67, 0xd3, 0x65, 0xbc, 0xce, 0x2e, 0xb5, 0x99, 0x41, 0xb4, 0xd9, 0x27, 0xf6, 0x74, - 0xba, 0xd8, 0x6e, 0x5f, 0xb7, 0xe7, 0xcf, 0x26, 0x73, 0xdb, 0x5e, 0xc7, 0x35, 0xda, 0xfd, 0x36, - 0x9d, 0x4b, 0x66, 0xf7, 0x5c, 0xc3, 0x5c, 0x30, 0x3a, 0xae, 0xe3, 0xda, 0x71, 0x11, 0xad, 0x82, - 0xd0, 0x62, 0xed, 0xda, 0x2d, 0xdf, 0x5e, 0xfc, 0x28, 0x1a, 0xe9, 0xe8, 0x6d, 0x5a, 0xce, 0x3d, - 0x9a, 0x7b, 0xb2, 0x54, 0x9d, 0xbc, 0xfb, 0xf5, 0x99, 0x07, 0xbe, 0xfd, 0xfa, 0xcc, 0xc8, 0x75, - 0x68, 0x23, 0x9c, 0xa2, 0xbd, 0x8d, 0xe6, 0x96, 0x6a, 0x37, 0x37, 0x74, 0xbb, 0x45, 0xdd, 0x9b, - 0x80, 0x6b, 0x7c, 0xa8, 0xbb, 0x4c, 0x72, 0x19, 0xcd, 0xba, 0xbc, 0xb1, 0x46, 0x61, 0xb4, 0x3a, - 0xae, 0xde, 0xf2, 0x51, 0x46, 0xab, 0x65, 0x81, 0x32, 0xbb, 0x11, 0xa3, 0x93, 0x3e, 0x09, 0xed, - 0x77, 0x73, 0xe8, 0xc1, 0x25, 0xcf, 0x71, 0xad, 0xf6, 0x3a, 0x75, 0x6d, 0xa3, 0xb1, 0xe4, 0xd9, - 0x36, 0x90, 0xea, 0xae, 0xee, 0x7a, 0xce, 0xbd, 0xad, 0xc3, 0x6f, 0xa0, 0xd1, 0x5d, 0xdd, 0xf4, - 0x68, 0x39, 0x0f, 0x2c, 0x13, 0xe7, 0x9f, 0xae, 0xa4, 0xba, 0x4d, 0x25, 0x98, 0xd8, 0xca, 0x6b, - 0x9e, 0x0e, 0xa3, 0xe9, 0xf6, 0xaa, 0x73, 0x02, 0x70, 0x52, 0x68, 0xbd, 0xc5, 0x90, 0x88, 0x0f, - 0xa8, 0x7d, 0x9a, 0x43, 0x8f, 0xa4, 0x5a, 0xb6, 0x66, 0x38, 0x2e, 0x6e, 0xa3, 0x51, 0xc3, 0xa5, - 0x6d, 0x07, 0xcc, 0x2b, 0x80, 0xee, 0xab, 0x95, 0x4c, 0x2e, 0x5b, 0x49, 0x05, 0xaf, 0x4e, 0x09, - 0xbb, 0x46, 0xaf, 0x31, 0x78, 0xe2, 0x6b, 0xd1, 0x7e, 0x27, 0x87, 0xb0, 0x2c, 0xe3, 0x8f, 0xee, - 0x7d, 0x8c, 0xd1, 0xeb, 0xfb, 0x19, 0xa3, 0xe3, 0x02, 0x70, 0xc2, 0x57, 0xa7, 0x0c, 0xd1, 0xc7, - 0x39, 0x74, 0xb2, 0xdf, 0x22, 0x3e, 0x36, 0x5b, 0xea, 0xd8, 0x2c, 0xee, 0x63, 0x6c, 0x7c, 0xd4, - 0x94, 0x41, 0xf9, 0xb3, 0x3c, 0x2a, 0x2d, 0xeb, 0xb4, 0x6d, 0x75, 0xea, 0x30, 0x16, 0x6f, 0xa0, - 0x62, 0x1b, 0xe4, 0x9b, 0xba, 0xab, 0xf3, 0xf1, 0x98, 0x38, 0xff, 0xe4, 0x80, 0xce, 0xee, 0x9e, - 0xab, 0xdc, 0xd8, 0xbc, 0x4d, 0x1b, 0x2e, 0xe8, 0xd1, 0xab, 0x58, 0xe0, 0xa3, 0xa8, 0x8d, 0x84, - 0x68, 0xf8, 0x5d, 0x34, 0xe2, 0x74, 0x69, 0x43, 0x0c, 0xe1, 0x0b, 0x19, 0xbb, 0x13, 0x5a, 0x58, - 0x07, 0x8c, 0x68, 0x8e, 0xd8, 0x17, 0xe1, 0xb8, 0x30, 0x5e, 0x63, 0x0e, 0x9f, 0xfc, 0x72, 0x81, - 0x6b, 0xb8, 0x3c, 0xb4, 0x06, 0xdf, 0x85, 0xa6, 0x85, 0x8e, 0x31, 0xff, 0x9b, 0x08, 0x74, 0xed, - 0x1f, 0x73, 0x68, 0x2a, 0xe4, 0xe5, 0x33, 0xf5, 0x4e, 0xdf, 0x98, 0x2d, 0x0c, 0x18, 0x33, 0x29, - 0xd2, 0x55, 0x98, 0x38, 0x1f, 0xba, 0x59, 0xa1, 0xac, 0x18, 0xb4, 0x48, 0x03, 0xf7, 0x4e, 0xe0, - 0x08, 0x79, 0xee, 0x08, 0x17, 0x86, 0xed, 0x57, 0xca, 0xfc, 0xff, 0x9b, 0xdc, 0x9f, 0xba, 0x3f, - 0x92, 0x45, 0x87, 0x9a, 0x30, 0x83, 0x96, 0x2d, 0xfa, 0x93, 0x75, 0xb6, 0xd6, 0xf4, 0x4d, 0x6a, - 0xd6, 0x05, 0x46, 0x75, 0x92, 0x75, 0x2c, 0xf8, 0x22, 0x21, 0x36, 0x7e, 0x0b, 0x15, 0xc1, 0x82, - 0xae, 0x09, 0xa1, 0x55, 0x78, 0xc5, 0xd9, 0xc1, 0xbe, 0x56, 0xb3, 0x9a, 0x1b, 0x42, 0x80, 0xbb, - 0x41, 0x38, 0x6a, 0x41, 0x2b, 0x09, 0x01, 0xb5, 0x4f, 0xf2, 0x68, 0x26, 0x36, 0xa5, 0xf8, 0x16, - 0x3a, 0xd9, 0xf0, 0xc3, 0xc4, 0x75, 0xaf, 0x0d, 0x0a, 0xea, 0x8d, 0x6d, 0xda, 0xf4, 0x4c, 0xda, - 0x14, 0x61, 0xf7, 0xb4, 0xc0, 0x3b, 0xb9, 0x94, 0xc8, 0x45, 0x52, 0xa4, 0xf1, 0x2b, 0x08, 0x77, - 0x78, 0xd3, 0xba, 0xe1, 0x38, 0x21, 0x66, 0x9e, 0x63, 0xce, 0x0b, 0x4c, 0x7c, 0xbd, 0x8f, 0x83, - 0x24, 0x48, 0x31, 0x1b, 0x9b, 0xd4, 0x31, 0x6c, 0xda, 0x8c, 0xdb, 0x58, 0x50, 0x6d, 0x5c, 0x4e, - 0xe4, 0x22, 0x29, 0xd2, 0xda, 0x9f, 0xe7, 0x11, 0x5a, 0xa6, 0x5d, 0xd3, 0xea, 0xb5, 0xa1, 0x07, - 0x87, 0xb8, 0xce, 0xdf, 0x53, 0xd6, 0xf9, 0x8b, 0x59, 0xbd, 0x35, 0x34, 0x31, 0x75, 0xa1, 0xb7, - 0x62, 0x0b, 0xfd, 0xa5, 0xe1, 0x55, 0x0c, 0x5e, 0xe9, 0xff, 0x94, 0x43, 0xd3, 0x11, 0xf3, 0x51, - 0x2c, 0xf5, 0x77, 0xd5, 0xa5, 0xfe, 0xfc, 0xd0, 0x3d, 0x4b, 0x59, 0xeb, 0x9f, 0x17, 0x10, 0x8e, - 0x98, 0x88, 0x65, 0x9a, 0x9b, 0x7a, 0x63, 0xe7, 0x3e, 0x12, 0xe0, 0x1f, 0x43, 0xe6, 0xf4, 0xba, - 0x4d, 0x56, 0x04, 0x2d, 0x76, 0x3a, 0x96, 0xcb, 0x0b, 0x98, 0xc0, 0xcc, 0x5f, 0x19, 0xda, 0xcc, - 0xc0, 0x82, 0xca, 0xcd, 0x3e, 0xec, 0x95, 0x8e, 0x6b, 0xf7, 0xa2, 0xd5, 0xd3, 0xcf, 0x40, 0x12, - 0x0c, 0xc2, 0x1f, 0x20, 0x64, 0x0b, 0xcc, 0x0d, 0x4b, 0xf8, 0x47, 0x56, 0x17, 0x0c, 0x8c, 0x5a, - 0xb2, 0x3a, 0x5b, 0x46, 0x2b, 0xf2, 0x76, 0x12, 0x02, 0x13, 0x49, 0xc9, 0xfc, 0x0a, 0x3a, 0x95, - 0x62, 0x3d, 0x9e, 0x45, 0x85, 0x1d, 0xda, 0xf3, 0x87, 0x95, 0xb0, 0x7f, 0xe2, 0x39, 0xb9, 0x90, - 0x28, 0x89, 0x2a, 0xe0, 0x62, 0xfe, 0x42, 0x4e, 0xfb, 0x7c, 0x54, 0x76, 0x36, 0x1e, 0x87, 0x9f, - 0x44, 0x45, 0x1b, 0x5a, 0x8c, 0x86, 0xee, 0x88, 0x00, 0xc5, 0x23, 0x29, 0x11, 0x6d, 0x24, 0xa4, - 0x2a, 0x11, 0x3b, 0x7f, 0x44, 0x11, 0xbb, 0x70, 0xc0, 0x11, 0x1b, 0x5b, 0xd0, 0x09, 0x97, 0xd5, - 0xd9, 0xad, 0x5e, 0x79, 0x84, 0x83, 0x2f, 0xee, 0x63, 0x65, 0xfb, 0x40, 0x91, 0xc2, 0xa0, 0x85, - 0x84, 0x4a, 0xf0, 0x22, 0x9a, 0x69, 0x1b, 0x1d, 0x42, 0xf5, 0x66, 0xaf, 0x4e, 0x1b, 0x56, 0xa7, - 0xe9, 0x94, 0x47, 0xf9, 0x30, 0x9f, 0x12, 0x42, 0x33, 0xeb, 0x2a, 0x99, 0xc4, 0xf9, 0xf1, 0x1a, - 0x9a, 0xb3, 0xe9, 0xae, 0xc1, 0xcc, 0xb8, 0x0a, 0xcb, 0xd9, 0xb2, 0x7b, 0x6b, 0x46, 0xdb, 0x70, - 0xcb, 0x63, 0x7e, 0x19, 0x0f, 0x18, 0x73, 0x24, 0x81, 0x4e, 0x12, 0xa5, 0xf0, 0x13, 0x68, 0xac, - 0xab, 0x7b, 0x0e, 0xc4, 0xfa, 0x71, 0x90, 0x2f, 0x46, 0x81, 0xa9, 0xc6, 0x5b, 0x89, 0xa0, 0x42, - 0xd9, 0x2c, 0x7b, 0x79, 0xf1, 0x20, 0xbc, 0x7c, 0x3a, 0xdd, 0xc3, 0xb5, 0xef, 0xf2, 0x68, 0x36, - 0x1e, 0x34, 0x59, 0xce, 0xb3, 0x36, 0x1d, 0x6a, 0xef, 0xd2, 0xe6, 0x15, 0x7f, 0x7f, 0x04, 0xf0, - 0xdc, 0x4d, 0x0b, 0xd1, 0xaa, 0xbd, 0xd1, 0xc7, 0x41, 0x12, 0xa4, 0xf0, 0xd3, 0x92, 0xa3, 0xfb, - 0x59, 0x33, 0x9c, 0xb6, 0x04, 0x67, 0x87, 0x69, 0x13, 0x2b, 0x3f, 0x20, 0x8a, 0xd4, 0x18, 0x4e, - 0xdb, 0x4d, 0x95, 0x4c, 0xe2, 0xfc, 0xf8, 0x0a, 0x3a, 0xa6, 0xef, 0xea, 0x86, 0xa9, 0x6f, 0x9a, - 0x34, 0x04, 0x19, 0xe1, 0x20, 0x0f, 0x0a, 0x90, 0x63, 0x8b, 0x71, 0x06, 0xd2, 0x2f, 0x83, 0xd7, - 0xd1, 0x71, 0xaf, 0xd3, 0x0f, 0xe5, 0xbb, 0xd1, 0x43, 0x02, 0xea, 0xf8, 0xcd, 0x7e, 0x16, 0x92, - 0x24, 0xa7, 0xfd, 0x73, 0x4e, 0x8e, 0xcf, 0x81, 0xcb, 0xe2, 0x8b, 0x68, 0xc4, 0xed, 0x75, 0x83, - 0xf8, 0xfc, 0x44, 0x10, 0x9f, 0x37, 0xa0, 0xed, 0x7b, 0x5e, 0x09, 0xc4, 0x25, 0x18, 0x85, 0x70, - 0x19, 0xfc, 0x11, 0x9a, 0x62, 0x53, 0x69, 0x74, 0x5a, 0xfe, 0xa8, 0x88, 0xf8, 0xb0, 0x3a, 0x84, - 0xbb, 0x84, 0x18, 0x52, 0x9e, 0x39, 0x06, 0x86, 0x4c, 0x29, 0x44, 0xa2, 0xea, 0x83, 0xdd, 0xef, - 0xd4, 0xca, 0x5e, 0xd7, 0xb2, 0xdd, 0x1b, 0x5d, 0x3f, 0x46, 0x83, 0x97, 0x53, 0xde, 0xc0, 0xfb, - 0x23, 0x79, 0xb9, 0xcf, 0x46, 0x04, 0x15, 0x3f, 0x8e, 0x46, 0xe9, 0x9e, 0xde, 0x70, 0xb9, 0xc5, - 0xc5, 0x28, 0xa3, 0xad, 0xb0, 0x46, 0xe2, 0xd3, 0xb4, 0xbf, 0x80, 0x0d, 0xd4, 0x6a, 0xfd, 0x8a, - 0x6d, 0x79, 0xdd, 0xa0, 0xf3, 0x81, 0x9e, 0x5f, 0x46, 0x23, 0x36, 0x94, 0x3e, 0x62, 0xd4, 0x1e, - 0x0f, 0x46, 0x8d, 0x40, 0x1b, 0x8c, 0xda, 0xf1, 0x98, 0x94, 0x3f, 0x64, 0x4c, 0x00, 0xb2, 0xf0, - 0x98, 0xad, 0x77, 0x5a, 0x34, 0xc8, 0x6f, 0xcf, 0x65, 0x1c, 0xab, 0x6b, 0xcb, 0x84, 0x89, 0x47, - 0x1d, 0xe3, 0x9f, 0x50, 0x57, 0xf8, 0xa8, 0xda, 0x1f, 0xe4, 0xd0, 0xcc, 0xd5, 0x8d, 0x8d, 0xda, - 0xb5, 0x4e, 0x0b, 0x76, 0x8b, 0x4e, 0x4d, 0x77, 0xb7, 0x59, 0x0a, 0xee, 0xc2, 0x7f, 0xe3, 0x29, - 0x98, 0xd1, 0x08, 0xa7, 0xe0, 0x6d, 0x34, 0xce, 0xd6, 0x23, 0xed, 0x34, 0x87, 0x2c, 0xad, 0x84, - 0xba, 0xaa, 0x0f, 0x52, 0x9d, 0x11, 0x3a, 0xc6, 0x45, 0x03, 0x09, 0xe0, 0xb5, 0x5f, 0x45, 0x73, - 0x92, 0x79, 0x6c, 0xbc, 0xf8, 0x9e, 0x15, 0x37, 0xd0, 0x28, 0xb3, 0x24, 0xd8, 0x91, 0x66, 0xdd, - 0x60, 0xc5, 0xba, 0x1c, 0x4d, 0x28, 0xfb, 0x82, 0x12, 0x85, 0x63, 0x6b, 0xff, 0x91, 0x47, 0xa7, - 0xae, 0x5a, 0xb6, 0xf1, 0xa1, 0xd5, 0x71, 0x75, 0x13, 0xf2, 0xc7, 0xa2, 0xe7, 0x5a, 0x4e, 0x43, - 0x37, 0xa9, 0x7d, 0x88, 0x45, 0xab, 0xa9, 0x14, 0xad, 0xaf, 0x64, 0xed, 0x59, 0xb2, 0xbd, 0xa9, - 0x15, 0xac, 0x1b, 0xab, 0x60, 0xd7, 0x0e, 0x48, 0xdf, 0xe0, 0x72, 0xf6, 0x07, 0x39, 0xf4, 0x50, - 0x8a, 0xe4, 0x51, 0xd4, 0xb6, 0x3b, 0x6a, 0x6d, 0xbb, 0x7a, 0x30, 0x7d, 0x4e, 0x29, 0x74, 0x7f, - 0x98, 0x4f, 0xed, 0x2b, 0x2f, 0xad, 0x3e, 0x80, 0x5a, 0x83, 0x7d, 0x11, 0xba, 0x25, 0xfa, 0xba, - 0x94, 0xd1, 0x9e, 0xba, 0xb7, 0x19, 0x1c, 0xf5, 0x00, 0x08, 0x85, 0x6d, 0x61, 0x83, 0x4a, 0xd5, - 0x86, 0x00, 0x27, 0xa1, 0x1a, 0x7c, 0x0e, 0x4d, 0xf0, 0xea, 0x41, 0xc9, 0x73, 0x33, 0xec, 0x5c, - 0x68, 0x3d, 0x6a, 0x26, 0x32, 0x0f, 0x7e, 0x16, 0x44, 0xf4, 0xbd, 0x58, 0x96, 0x0b, 0x8f, 0x93, - 0xd6, 0x23, 0x12, 0x91, 0xf9, 0x20, 0xe4, 0x4f, 0x37, 0xba, 0x9e, 0x74, 0xd2, 0x28, 0xca, 0xa9, - 0xac, 0x5d, 0x4c, 0x3a, 0xb4, 0xac, 0x62, 0x50, 0x3d, 0x0d, 0x14, 0xa9, 0x8d, 0xc4, 0xd4, 0x69, - 0x7f, 0x5b, 0x40, 0x8f, 0x0c, 0xf4, 0x51, 0xbc, 0x3a, 0xa0, 0x7a, 0x38, 0x99, 0xa1, 0x72, 0x68, - 0xa2, 0x29, 0x53, 0x77, 0x5c, 0x3e, 0xdc, 0x1b, 0x46, 0x3b, 0xc8, 0x6e, 0xbf, 0x70, 0x9f, 0x8e, - 0xcb, 0x44, 0xfc, 0x14, 0xb6, 0x26, 0xa3, 0x10, 0x15, 0x94, 0x55, 0x1c, 0x62, 0xe7, 0x9f, 0x56, - 0x71, 0x2c, 0xa9, 0x64, 0x12, 0xe7, 0x67, 0x10, 0x62, 0x63, 0x1e, 0xab, 0x37, 0x42, 0x88, 0x65, - 0x95, 0x4c, 0xe2, 0xfc, 0x50, 0xf5, 0x9d, 0x11, 0xa8, 0xea, 0xf0, 0x4b, 0xa7, 0xc7, 0x7e, 0xdd, - 0xf1, 0x38, 0xc0, 0x9d, 0x59, 0x1a, 0xcc, 0x4a, 0xee, 0x85, 0xa5, 0xad, 0xa3, 0xa9, 0xab, 0x96, - 0xe3, 0xd6, 0x58, 0x4a, 0x66, 0x79, 0x0b, 0x3f, 0x82, 0x0a, 0xe0, 0x9c, 0x62, 0x27, 0x32, 0x21, - 0xcc, 0x2e, 0x30, 0xe7, 0x65, 0xed, 0x9c, 0xac, 0xef, 0x09, 0xbf, 0x8e, 0xc8, 0xe0, 0x97, 0xac, - 0x5d, 0xbb, 0x82, 0xc6, 0x45, 0x5e, 0x94, 0x81, 0x0a, 0x83, 0x81, 0x0a, 0x09, 0x40, 0x7f, 0x92, - 0x07, 0x24, 0x3f, 0x8d, 0x1c, 0x62, 0x42, 0x78, 0x5b, 0x49, 0x08, 0x17, 0x87, 0x4b, 0xb5, 0xa9, - 0x09, 0xa0, 0x19, 0x4b, 0x00, 0x2f, 0x0c, 0x89, 0x3f, 0x38, 0xe0, 0x7f, 0x91, 0x43, 0xd3, 0x6a, - 0xd2, 0x67, 0x11, 0x85, 0xad, 0x21, 0xa3, 0x41, 0xaf, 0x47, 0x1b, 0xfe, 0x30, 0xa2, 0xd4, 0x23, - 0x12, 0x91, 0xf9, 0x30, 0x0d, 0xc5, 0x98, 0x3b, 0x88, 0x41, 0xa9, 0xa4, 0x18, 0xcd, 0xae, 0x4e, - 0x2a, 0xfe, 0xd5, 0x09, 0x18, 0xea, 0xde, 0x80, 0x35, 0x6f, 0x43, 0x39, 0xd8, 0xa7, 0x86, 0x7b, - 0x96, 0x8c, 0xab, 0xfd, 0x43, 0x0e, 0x4d, 0x08, 0x83, 0x8f, 0x22, 0x23, 0xbd, 0xa5, 0x66, 0xa4, - 0xe7, 0x86, 0xac, 0xa7, 0x92, 0x33, 0xd0, 0x97, 0x51, 0x5f, 0x58, 0x05, 0xc5, 0x0a, 0xbc, 0x6d, - 0x58, 0x4e, 0xf1, 0x02, 0x8f, 0x2d, 0x31, 0xc2, 0x29, 0xf8, 0xb7, 0x72, 0x68, 0xd6, 0x88, 0xd5, - 0x5c, 0x62, 0xa8, 0x5f, 0x1a, 0xce, 0xb4, 0x10, 0x26, 0xba, 0x50, 0x8a, 0x53, 0x48, 0x9f, 0x4a, - 0xcd, 0x43, 0x7d, 0x5c, 0x58, 0x07, 0xeb, 0x5d, 0xb7, 0x3b, 0x64, 0xae, 0x4c, 0xaa, 0x26, 0xab, - 0x45, 0xde, 0x7d, 0xa0, 0x10, 0x0e, 0xad, 0x7d, 0x91, 0x0f, 0x07, 0xac, 0xee, 0xaf, 0x91, 0xb0, - 0xde, 0xcd, 0x1d, 0x44, 0xbd, 0x3b, 0x91, 0x54, 0xeb, 0x42, 0x04, 0x29, 0xb8, 0xe6, 0xb0, 0xe7, - 0x6d, 0x42, 0xc3, 0xc6, 0x5a, 0x3d, 0x8a, 0x53, 0xf0, 0x41, 0x18, 0x24, 0x7e, 0x0f, 0x8d, 0xb2, - 0xdd, 0x04, 0x5b, 0xe2, 0x85, 0xe1, 0x43, 0x08, 0x1b, 0xaf, 0xc8, 0xc3, 0xd8, 0x17, 0x78, 0x18, - 0xc7, 0x85, 0x32, 0x7d, 0x4a, 0x89, 0x03, 0xf8, 0x36, 0x9a, 0x34, 0x2d, 0xbd, 0x59, 0xd5, 0x4d, - 0x1d, 0x8a, 0x91, 0xe0, 0xec, 0xfe, 0x17, 0x07, 0x47, 0xc4, 0x35, 0x49, 0x42, 0xc4, 0x93, 0xf0, - 0x52, 0x4f, 0xa6, 0x11, 0x05, 0x5b, 0xd3, 0x11, 0x8a, 0x7a, 0x8f, 0xcf, 0xa0, 0x51, 0xe6, 0xc2, - 0xfe, 0xce, 0xa0, 0x54, 0x2d, 0x31, 0x5b, 0x99, 0x67, 0x83, 0xad, 0xbc, 0x1d, 0x9f, 0x47, 0xc8, - 0xa1, 0x0d, 0x9b, 0xba, 0x3c, 0xec, 0xf0, 0xc3, 0xaf, 0x28, 0x00, 0xd7, 0x43, 0x0a, 0x91, 0xb8, - 0xb4, 0xdf, 0xcb, 0xa3, 0xc2, 0x2b, 0xd6, 0xe6, 0x21, 0x06, 0xf9, 0x37, 0x94, 0x20, 0x9f, 0x75, - 0xfd, 0x83, 0x6d, 0xa9, 0x01, 0xfe, 0xfd, 0x58, 0x80, 0xbf, 0x30, 0x04, 0xf6, 0xe0, 0xe0, 0xfe, - 0x2f, 0x05, 0x34, 0x09, 0x5c, 0x4b, 0x56, 0xa7, 0x69, 0xf0, 0x52, 0xe8, 0x19, 0xe5, 0x90, 0xe0, - 0xd1, 0xd8, 0x21, 0xc1, 0xac, 0xcc, 0x2b, 0x1d, 0x0f, 0xdc, 0x0a, 0x0d, 0xf5, 0x27, 0xe5, 0xb2, - 0xaa, 0x0e, 0x24, 0x07, 0xde, 0xbe, 0x57, 0x42, 0x4c, 0xd5, 0x3c, 0xd8, 0xad, 0xf2, 0x1a, 0xaa, - 0x66, 0x5b, 0x9b, 0x7e, 0x61, 0x56, 0xc8, 0x5e, 0x98, 0x9d, 0x10, 0xb6, 0xf0, 0xe2, 0x2c, 0x44, - 0x22, 0x2a, 0x30, 0xbe, 0x83, 0x30, 0x6b, 0xd8, 0x80, 0xcd, 0xb5, 0xe3, 0xf7, 0x8e, 0xa9, 0x1b, - 0xc9, 0xae, 0x2e, 0x3c, 0xb5, 0x5a, 0xeb, 0x83, 0x23, 0x09, 0x2a, 0xd8, 0x39, 0x86, 0x4d, 0x75, - 0x07, 0xea, 0xd6, 0x51, 0x3e, 0x74, 0xd1, 0x76, 0x9f, 0xb7, 0x12, 0x41, 0xc5, 0x4f, 0xa1, 0xf1, - 0x36, 0xac, 0x13, 0x56, 0x9f, 0x8d, 0x71, 0xc6, 0x70, 0xe7, 0xbd, 0xee, 0x37, 0x93, 0x80, 0xae, - 0xfd, 0x4d, 0x0e, 0x8d, 0xc3, 0x44, 0x1d, 0x45, 0xf2, 0x7b, 0x5d, 0x4d, 0x7e, 0xe7, 0xb3, 0x3b, - 0x68, 0x4a, 0xe2, 0xfb, 0xab, 0x02, 0xef, 0x03, 0x8f, 0xe1, 0xb0, 0xe7, 0xe9, 0xea, 0xb6, 0x6e, - 0x9a, 0xd4, 0x34, 0x9c, 0xb6, 0x28, 0x1d, 0xf9, 0x9e, 0xa7, 0x16, 0x35, 0x13, 0x99, 0x87, 0x89, - 0x34, 0xac, 0x76, 0xd7, 0xa4, 0xc1, 0x0d, 0x43, 0x28, 0xb2, 0x14, 0x35, 0x13, 0x99, 0x07, 0xdf, - 0x40, 0x27, 0xf4, 0x86, 0x6b, 0xec, 0xd2, 0x65, 0xaa, 0x37, 0x4d, 0xa3, 0x43, 0x83, 0xd3, 0xdc, - 0x02, 0x2f, 0x21, 0x1f, 0x04, 0xe1, 0x13, 0x8b, 0x49, 0x0c, 0x24, 0x59, 0x4e, 0x39, 0x4e, 0x1f, - 0x39, 0xc4, 0xe3, 0xf4, 0x67, 0xd0, 0xa4, 0x0e, 0x3b, 0xa3, 0x80, 0xc2, 0xfd, 0xa8, 0x58, 0x9d, - 0x65, 0xa1, 0x77, 0x51, 0x6a, 0x27, 0x0a, 0x97, 0x72, 0x08, 0x3f, 0x76, 0xd0, 0xd7, 0xa6, 0x7f, - 0x5d, 0x40, 0xa5, 0x30, 0xf8, 0x60, 0x0b, 0xa1, 0x46, 0xb0, 0xc0, 0x83, 0x63, 0x9f, 0x4b, 0xd9, - 0x3d, 0x25, 0x0c, 0x12, 0x51, 0x3c, 0x0e, 0x9b, 0x1c, 0x22, 0xa9, 0x80, 0x88, 0x5c, 0x82, 0x00, - 0x62, 0xbb, 0xc3, 0xee, 0xe5, 0xa6, 0x00, 0xbb, 0x54, 0x0f, 0x10, 0x48, 0x04, 0x86, 0x5b, 0xb0, - 0x29, 0x0e, 0x7d, 0x66, 0xd8, 0x88, 0xe4, 0x6f, 0x7e, 0x15, 0x18, 0x12, 0x83, 0x65, 0x61, 0xc1, - 0xf7, 0x2a, 0xb1, 0xc1, 0x0b, 0xc3, 0x82, 0xef, 0x82, 0x44, 0x50, 0xf1, 0x02, 0x74, 0xd5, 0x6b, - 0x34, 0x28, 0x6d, 0xd2, 0xa6, 0xd8, 0xb8, 0x1d, 0x13, 0xac, 0xa5, 0x7a, 0x40, 0x20, 0x11, 0x0f, - 0x03, 0xde, 0xd2, 0x0d, 0x76, 0x13, 0x3c, 0xa6, 0x02, 0xaf, 0xf2, 0x56, 0x22, 0xa8, 0xda, 0xff, - 0xe7, 0xd1, 0x94, 0xe2, 0x7f, 0xf8, 0x37, 0x73, 0xec, 0x20, 0xc1, 0x6d, 0x6c, 0xf3, 0xe6, 0x60, - 0x22, 0xd7, 0xf7, 0xe3, 0xd3, 0x95, 0xf5, 0x08, 0xcf, 0xbf, 0xaa, 0x93, 0xce, 0x25, 0x42, 0x0a, - 0x91, 0xd5, 0xe2, 0x4f, 0xa0, 0xc0, 0xe5, 0xdf, 0x2b, 0x7b, 0x5d, 0x56, 0x39, 0x48, 0x57, 0x88, - 0x57, 0xf6, 0x63, 0x0b, 0xa1, 0x1f, 0x78, 0xb0, 0x53, 0xe6, 0xe7, 0xd1, 0x61, 0xa1, 0xbb, 0x1e, - 0x53, 0x44, 0xfa, 0x54, 0xcf, 0x5f, 0x46, 0xb3, 0xf1, 0x5e, 0x64, 0xba, 0xb2, 0xfb, 0xa3, 0x1c, - 0x2a, 0xa7, 0x19, 0xc2, 0x76, 0xb1, 0x21, 0x50, 0x54, 0x1d, 0xbe, 0x4a, 0x7b, 0x3e, 0xea, 0x0a, - 0x2a, 0x5a, 0x5d, 0x76, 0x8a, 0x21, 0x6e, 0xec, 0x4a, 0xd5, 0xa7, 0x82, 0x55, 0x79, 0x43, 0xb4, - 0x43, 0xee, 0x3d, 0xa1, 0xc0, 0x07, 0x04, 0x12, 0x8a, 0x62, 0x0d, 0x8d, 0x71, 0x7b, 0xfc, 0x2a, - 0xb3, 0x54, 0x45, 0xcc, 0x1f, 0x78, 0x7d, 0x0d, 0xa9, 0xd8, 0xa7, 0x68, 0x7f, 0x0a, 0x85, 0x35, - 0x4b, 0x00, 0xc1, 0xb9, 0xf8, 0x25, 0x96, 0x9a, 0x25, 0x58, 0x61, 0xa3, 0x94, 0x6d, 0xe5, 0x2e, - 0xa9, 0xbc, 0x4c, 0x78, 0xcb, 0xa0, 0x66, 0xb3, 0x2e, 0x5f, 0x37, 0x4a, 0xc2, 0xab, 0x32, 0x91, - 0xa8, 0xbc, 0xec, 0x44, 0xff, 0x0e, 0x1b, 0x70, 0xbe, 0xf4, 0xa4, 0x13, 0xfd, 0xd7, 0x59, 0x23, - 0xf1, 0x69, 0xec, 0xa4, 0x24, 0x38, 0x58, 0x13, 0x4f, 0xec, 0xf8, 0x42, 0x2a, 0x45, 0x27, 0x25, - 0x44, 0x25, 0x93, 0x38, 0x3f, 0xbe, 0x88, 0xa6, 0xd9, 0x5b, 0x3f, 0xcb, 0x73, 0xe5, 0x7b, 0xbd, - 0x82, 0xbf, 0x7c, 0x37, 0x14, 0x0a, 0x89, 0x71, 0xf2, 0xe7, 0x3d, 0xd7, 0xa9, 0x7b, 0xc7, 0xb2, - 0x77, 0x6a, 0x96, 0x69, 0x34, 0x7a, 0x87, 0x58, 0x7f, 0x6e, 0x2a, 0xf5, 0xe7, 0xcb, 0x19, 0xd7, - 0x80, 0x62, 0x65, 0x5a, 0x25, 0xaa, 0xfd, 0x1f, 0x38, 0xa9, 0xc2, 0x29, 0x6f, 0x4a, 0x29, 0x1a, - 0x65, 0x57, 0x2d, 0x41, 0x44, 0xd8, 0x97, 0x05, 0x6c, 0x07, 0x2f, 0x9d, 0xe9, 0x33, 0x58, 0xe2, - 0xa3, 0xb3, 0x7e, 0x6e, 0xd9, 0x56, 0x5b, 0xac, 0xf5, 0xfd, 0x69, 0xa1, 0xd4, 0x8e, 0xfa, 0xb9, - 0x0a, 0xa8, 0x84, 0x63, 0x6b, 0xff, 0x9e, 0x43, 0xc7, 0x14, 0xce, 0xa3, 0x28, 0xa2, 0x74, 0xb5, - 0x88, 0x7a, 0x61, 0x3f, 0x3d, 0x4b, 0x29, 0xa7, 0x7e, 0x3b, 0x1f, 0xeb, 0x17, 0x1b, 0x01, 0x48, - 0xcc, 0x13, 0x5d, 0xab, 0x59, 0x3f, 0xc8, 0x57, 0x5a, 0x7e, 0x59, 0x16, 0x81, 0x12, 0x59, 0x03, - 0xfe, 0x75, 0x18, 0x5e, 0xf6, 0x12, 0xc4, 0xe9, 0xea, 0x0d, 0x5a, 0x3f, 0xc8, 0xb7, 0x06, 0x27, - 0xd8, 0x65, 0xeb, 0xf5, 0x38, 0x34, 0xe9, 0xd7, 0xa6, 0xfd, 0x65, 0x7c, 0x8a, 0x99, 0x93, 0xe1, - 0xd7, 0x50, 0x91, 0x3f, 0xcb, 0x6d, 0x58, 0xa6, 0x88, 0x64, 0xcf, 0xb2, 0xd9, 0xaa, 0x89, 0x36, - 0x88, 0xa2, 0x3f, 0x37, 0x70, 0x07, 0x13, 0x30, 0x92, 0x10, 0x06, 0xaf, 0xa1, 0x91, 0xee, 0xf0, - 0xe7, 0x5c, 0xfc, 0x60, 0x83, 0x1f, 0x6e, 0x71, 0x14, 0xed, 0x47, 0x71, 0xb3, 0x79, 0x69, 0xec, - 0x1c, 0xfc, 0x0c, 0x86, 0x19, 0x38, 0x75, 0x16, 0x6d, 0x34, 0x2e, 0x8e, 0x7b, 0x86, 0xcc, 0xbb, - 0x69, 0x91, 0x24, 0xda, 0xd3, 0x04, 0x8d, 0x81, 0x22, 0xbe, 0x30, 0xb9, 0x41, 0x0d, 0xcf, 0x36, - 0xdc, 0xde, 0xa1, 0x07, 0xd5, 0x2d, 0x25, 0xa8, 0x2e, 0x67, 0xec, 0x60, 0x9f, 0xa5, 0xa9, 0x81, - 0xf5, 0xbf, 0x72, 0xe8, 0x44, 0x1f, 0xf7, 0x51, 0x04, 0x1d, 0xaa, 0x06, 0x9d, 0x97, 0xf7, 0xdb, - 0xc3, 0x94, 0xc0, 0x73, 0x17, 0x25, 0xf4, 0x8f, 0xbb, 0xee, 0x79, 0x84, 0xba, 0xb6, 0xb1, 0x0b, - 0xc5, 0x66, 0x4b, 0x3c, 0x9d, 0x2c, 0x46, 0x73, 0x52, 0x0b, 0x29, 0x44, 0xe2, 0xc2, 0xbf, 0xc6, - 0x9e, 0x35, 0x6e, 0xe9, 0x9e, 0xe9, 0x2e, 0x36, 0x9b, 0x4b, 0x7a, 0x57, 0xdf, 0x34, 0x4c, 0xa8, - 0xf9, 0xc5, 0x1d, 0x7b, 0xa9, 0xba, 0xe2, 0x3f, 0x69, 0x4c, 0xe2, 0x80, 0x15, 0xfc, 0xf3, 0x83, - 0xcf, 0x20, 0x02, 0xe6, 0x1e, 0x49, 0x51, 0x82, 0x7f, 0x03, 0xb2, 0xa0, 0xed, 0x57, 0x67, 0xcd, - 0x65, 0xdb, 0xea, 0x2a, 0x16, 0xf8, 0xa5, 0xd3, 0x15, 0xb0, 0xa0, 0x4c, 0x52, 0x78, 0xb2, 0xd8, - 0x90, 0xaa, 0x08, 0xbb, 0xe8, 0x38, 0xec, 0x73, 0xad, 0x3b, 0x54, 0x1d, 0x81, 0x11, 0xae, 0xbf, - 0xca, 0x5e, 0x8a, 0x2c, 0xf6, 0x93, 0xb3, 0xa8, 0x4e, 0x82, 0x87, 0x8d, 0xc6, 0xf8, 0xae, 0x65, - 0x7a, 0x10, 0x4d, 0xa1, 0x0c, 0x62, 0x9a, 0x58, 0xc4, 0x1d, 0xbf, 0xe5, 0x37, 0x7d, 0xcf, 0xf6, - 0x0f, 0x75, 0x7e, 0x20, 0x14, 0x70, 0xb1, 0x4b, 0x02, 0x76, 0x6a, 0x27, 0xd6, 0x3a, 0xdf, 0x6d, - 0x14, 0xa3, 0xe0, 0x72, 0x35, 0x22, 0x11, 0x99, 0x0f, 0xb7, 0x51, 0x69, 0x5b, 0x5c, 0x18, 0x39, - 0xe5, 0xf1, 0xa1, 0x12, 0xa2, 0x72, 0xe1, 0x14, 0x6d, 0x87, 0x82, 0x66, 0x87, 0x44, 0x1a, 0xd8, - 0xb1, 0x0a, 0xff, 0xb8, 0xb6, 0xcc, 0x5f, 0x40, 0x15, 0xa3, 0x10, 0x74, 0xd5, 0x6f, 0x26, 0x01, - 0x3d, 0x60, 0xbd, 0x56, 0x5b, 0x2a, 0x97, 0xfa, 0x59, 0xa1, 0x99, 0x04, 0x74, 0xdc, 0x45, 0xe3, - 0x0e, 0x5d, 0x33, 0x3a, 0xde, 0x5e, 0x19, 0xf1, 0xa5, 0xbb, 0x92, 0xf5, 0x5e, 0x78, 0x85, 0x4b, - 0xc7, 0x1e, 0xa3, 0x44, 0x1a, 0x05, 0x9d, 0x04, 0x6a, 0xf0, 0x1e, 0x2a, 0xd9, 0x5e, 0x67, 0xd1, - 0xb9, 0xe9, 0x50, 0xbb, 0x3c, 0xc1, 0x75, 0x66, 0x8d, 0xca, 0x24, 0x90, 0x8f, 0x6b, 0x0d, 0x47, - 0x30, 0xe4, 0x20, 0x91, 0x32, 0xfc, 0xfb, 0x39, 0x84, 0x1d, 0xaf, 0x0b, 0x9b, 0x57, 0xb6, 0x63, - 0xd1, 0x4d, 0xfe, 0x1e, 0xc6, 0x29, 0x4f, 0x72, 0x1b, 0x6a, 0x99, 0xef, 0xc3, 0xe3, 0x40, 0x71, - 0x63, 0xc2, 0xf3, 0xb5, 0x7e, 0x56, 0x92, 0x60, 0x07, 0x9b, 0x8a, 0x2d, 0x87, 0xff, 0xbb, 0x3c, - 0x35, 0xd4, 0x54, 0x24, 0xbf, 0x0b, 0x8a, 0xa6, 0x42, 0xd0, 0x49, 0xa0, 0x86, 0xbd, 0xbd, 0xb6, - 0xa9, 0xde, 0xbc, 0xd1, 0x31, 0x7b, 0xc4, 0xb2, 0xdc, 0x55, 0x88, 0x5d, 0x4e, 0xcf, 0x81, 0x68, - 0x58, 0x9e, 0xe6, 0x6e, 0x13, 0xbe, 0xbd, 0x26, 0x89, 0x5c, 0x24, 0x45, 0x9a, 0xbf, 0xbd, 0x16, - 0xd7, 0xb8, 0x87, 0xfb, 0x1b, 0x8b, 0xfd, 0xbd, 0xbd, 0x8e, 0x4c, 0x3c, 0xb4, 0xb7, 0xd7, 0x92, - 0x8a, 0x7b, 0xbf, 0xbd, 0x8e, 0x98, 0x7f, 0x0a, 0xde, 0x5e, 0x47, 0xc6, 0xa6, 0xe4, 0xd3, 0x1f, - 0x2b, 0x3d, 0xfa, 0x19, 0x7c, 0xe0, 0xcb, 0x2e, 0x74, 0x66, 0xe3, 0x0e, 0xa0, 0xbc, 0xfd, 0xcc, - 0xdd, 0xf3, 0xed, 0x67, 0x0d, 0xcd, 0x6d, 0x79, 0xa6, 0xd9, 0xe3, 0xbd, 0x91, 0xde, 0x52, 0xf8, - 0xc7, 0xc4, 0x0f, 0x0b, 0xc9, 0xb9, 0xd5, 0x04, 0x1e, 0x92, 0x28, 0x99, 0xf2, 0x8e, 0xb5, 0x30, - 0xd4, 0x3b, 0xd6, 0x4b, 0x68, 0x8a, 0x45, 0x80, 0x5e, 0xec, 0x89, 0x47, 0x78, 0x38, 0x42, 0x64, - 0x22, 0x51, 0x79, 0xb5, 0x87, 0xd1, 0xbc, 0xf8, 0x37, 0xc3, 0x5a, 0xb2, 0x3a, 0x2e, 0x7b, 0x47, - 0x49, 0xed, 0x65, 0xaf, 0xdd, 0xee, 0x69, 0x97, 0xc1, 0x79, 0x94, 0x17, 0xbb, 0xfe, 0xc0, 0xf9, - 0x8f, 0x88, 0xc5, 0x53, 0x0a, 0x69, 0xe0, 0xfc, 0x76, 0x12, 0x72, 0x68, 0xdf, 0xe4, 0xd0, 0xa9, - 0x94, 0x37, 0x9c, 0xf8, 0x36, 0x9a, 0x6e, 0xeb, 0x7b, 0xd2, 0x23, 0x55, 0xb1, 0xbc, 0xb2, 0x6e, - 0x7c, 0xf8, 0xf1, 0xca, 0xba, 0x82, 0x44, 0x62, 0xc8, 0x3c, 0xf6, 0xe9, 0x7b, 0x75, 0xcf, 0x6e, - 0xd1, 0x21, 0xb7, 0x57, 0xdc, 0x75, 0xd7, 0x05, 0x06, 0x09, 0xd1, 0xd8, 0x4b, 0xd0, 0x72, 0x5a, - 0x22, 0x84, 0x92, 0x46, 0x7e, 0x0b, 0xfa, 0x58, 0xec, 0x2d, 0xe8, 0xb1, 0x3e, 0xb9, 0x23, 0x7a, - 0x09, 0xfa, 0x65, 0x0e, 0x9d, 0x4c, 0x2e, 0x18, 0xf0, 0x2f, 0x29, 0x16, 0x9f, 0x89, 0x59, 0x3c, - 0x13, 0x93, 0x12, 0xf6, 0x6e, 0xa3, 0x69, 0x51, 0x56, 0x08, 0x98, 0xfb, 0xf8, 0xc1, 0xe2, 0x6e, - 0x58, 0xb3, 0x04, 0x09, 0x92, 0xcf, 0xa3, 0xda, 0x46, 0x62, 0xb8, 0xda, 0x1f, 0xe6, 0xd1, 0x28, - 0x7f, 0x20, 0x75, 0x88, 0xd9, 0xec, 0x4d, 0x25, 0x9b, 0x65, 0xbd, 0x42, 0xe5, 0xd6, 0xa5, 0x26, - 0xb2, 0xcd, 0x58, 0x22, 0xbb, 0x38, 0x14, 0xfa, 0xe0, 0x1c, 0xf6, 0x3c, 0x2a, 0x85, 0x46, 0x64, - 0x8b, 0x73, 0xac, 0x62, 0x98, 0x90, 0x54, 0x64, 0x8c, 0x92, 0xbb, 0x4a, 0xb6, 0x18, 0xe6, 0x97, - 0xb5, 0x92, 0xee, 0x4a, 0x90, 0x26, 0xfc, 0x63, 0xfe, 0xe8, 0x89, 0x63, 0x7f, 0xf6, 0x80, 0x20, - 0xe5, 0xff, 0x3c, 0x39, 0x3c, 0xd6, 0x28, 0x70, 0xef, 0x3d, 0x29, 0x64, 0xa6, 0x37, 0x14, 0x2a, - 0x89, 0x71, 0xcf, 0x43, 0xfc, 0x54, 0x94, 0x65, 0x3a, 0x8d, 0xff, 0x26, 0x8f, 0x26, 0xeb, 0x20, - 0xa9, 0xb7, 0xe8, 0x92, 0xa9, 0x1f, 0xea, 0xe3, 0x30, 0xd8, 0x20, 0x75, 0x6d, 0xcb, 0x8f, 0xac, - 0x34, 0x38, 0x02, 0x8f, 0x4e, 0x5f, 0x22, 0x12, 0x91, 0xf9, 0xf0, 0x47, 0xb0, 0x6f, 0xd6, 0x6d, - 0x1d, 0x60, 0xa0, 0x32, 0x11, 0xcf, 0x42, 0x5e, 0xcd, 0x3a, 0x31, 0x52, 0x0f, 0x2b, 0xb5, 0x10, - 0xcd, 0x9f, 0x9b, 0x68, 0x13, 0x1e, 0x12, 0x88, 0xa4, 0x72, 0xfe, 0x45, 0x34, 0x13, 0x13, 0xc9, - 0x34, 0xc2, 0xff, 0x9a, 0x43, 0xb3, 0xb2, 0xfe, 0xa3, 0xa8, 0xca, 0xde, 0x57, 0xab, 0xb2, 0x4b, - 0xfb, 0x18, 0xae, 0x94, 0xba, 0xec, 0xef, 0x72, 0x68, 0x2e, 0xe9, 0x31, 0x2f, 0x7b, 0xb1, 0xb5, - 0x63, 0x88, 0xd7, 0x47, 0xd2, 0x8b, 0xad, 0x57, 0xa1, 0x8d, 0x70, 0x4a, 0xf8, 0xbb, 0xb9, 0x7c, - 0xea, 0xef, 0xe6, 0xce, 0x23, 0x04, 0xa6, 0x05, 0xf7, 0x18, 0x05, 0xf5, 0xdd, 0x4b, 0xf4, 0x3f, - 0x11, 0x20, 0x12, 0x17, 0x7f, 0xa3, 0x17, 0xd9, 0x23, 0x2e, 0x3f, 0xa2, 0xc7, 0x73, 0x92, 0xa9, - 0x32, 0x9f, 0xf6, 0xf7, 0x39, 0xf4, 0xd8, 0x3d, 0x37, 0x61, 0xb8, 0xaa, 0xa4, 0x95, 0x4a, 0x2c, - 0xad, 0x9c, 0x4e, 0x07, 0x38, 0xc2, 0xdf, 0x47, 0x7c, 0x9a, 0x47, 0x78, 0x63, 0xdb, 0xb0, 0x9b, - 0xe0, 0xac, 0x6e, 0x2f, 0xb8, 0xed, 0x39, 0xdc, 0xf5, 0xdc, 0xa4, 0x4e, 0xc3, 0x36, 0xf8, 0x20, - 0xc5, 0xd7, 0xf3, 0x72, 0x44, 0x22, 0x32, 0x1f, 0x6c, 0x86, 0x8a, 0xc2, 0x9b, 0x83, 0xd5, 0x9c, - 0x75, 0xd3, 0x10, 0x79, 0x40, 0xb4, 0x08, 0x44, 0x03, 0xc4, 0xf3, 0x00, 0x5c, 0xfb, 0x0c, 0xca, - 0x84, 0xfe, 0x01, 0x59, 0xf6, 0x9f, 0x30, 0x1d, 0xd6, 0xa0, 0x3c, 0x8c, 0x46, 0x38, 0x2a, 0x1b, - 0x8d, 0x49, 0xff, 0x50, 0x9b, 0x69, 0x24, 0xbc, 0x55, 0xfb, 0x2e, 0x87, 0xe6, 0x93, 0x4d, 0x3a, - 0x8a, 0xa8, 0x70, 0x5b, 0x8d, 0x0a, 0x59, 0x0f, 0x06, 0x92, 0x0d, 0x4f, 0x89, 0x0f, 0xdf, 0x24, - 0x0e, 0xfe, 0x51, 0xf4, 0x72, 0x4b, 0xed, 0xe5, 0xe2, 0xbe, 0x7b, 0x99, 0xdc, 0xc3, 0xea, 0x53, - 0x77, 0xff, 0xe7, 0xf4, 0x03, 0x5f, 0xc1, 0xdf, 0x7f, 0xc2, 0xdf, 0xc7, 0xdf, 0x9e, 0xce, 0xdd, - 0x85, 0xbf, 0xaf, 0xe0, 0xef, 0xbf, 0xe1, 0xef, 0xb3, 0xff, 0x3d, 0xfd, 0xc0, 0x9b, 0xe3, 0x02, - 0xf3, 0x27, 0x01, 0x00, 0x00, 0xff, 0xff, 0x55, 0x2c, 0x00, 0x0f, 0x93, 0x46, 0x00, 0x00, + // 3921 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe4, 0x1b, 0x4d, 0x8f, 0x1c, 0x47, + 0x35, 0x33, 0xb3, 0x1f, 0x33, 0xb5, 0x1f, 0x5e, 0x97, 0xd7, 0xf6, 0x64, 0x93, 0xd8, 0x49, 0x47, + 0x84, 0x44, 0xc4, 0xb3, 0xd8, 0x24, 0xc1, 0x71, 0x12, 0x27, 0x3b, 0xfb, 0x61, 0x3b, 0xd9, 0xb5, + 0x27, 0x35, 0x6b, 0x27, 0xe4, 0x93, 0xde, 0x99, 0xda, 0xd9, 0xf6, 0xf6, 0x4c, 0x4f, 0xfa, 0x63, + 0xbd, 0x13, 0x84, 0x08, 0x02, 0x24, 0x2e, 0x09, 0xb9, 0x11, 0x09, 0x38, 0x20, 0x81, 0x38, 0x20, + 0x22, 0x90, 0x90, 0x72, 0xe0, 0x02, 0x48, 0x08, 0x73, 0x40, 0x04, 0x04, 0x82, 0x0b, 0x09, 0x04, + 0x41, 0xc4, 0x5f, 0x08, 0x48, 0xf0, 0xaa, 0xba, 0xba, 0xbb, 0xaa, 0xa7, 0x7b, 0xec, 0x99, 0xfd, + 0x90, 0x10, 0x87, 0x95, 0xdd, 0xf5, 0x3e, 0xeb, 0xd5, 0xab, 0xf7, 0x5e, 0x55, 0xbd, 0x41, 0x8f, + 0x6d, 0x9e, 0x76, 0x4a, 0x86, 0x35, 0xbb, 0xe9, 0xad, 0x51, 0xbb, 0x45, 0x5d, 0xea, 0xcc, 0xb6, + 0x37, 0x1b, 0xb3, 0x7a, 0xdb, 0x70, 0x66, 0xe9, 0xb6, 0x4b, 0x5b, 0x8e, 0x61, 0xb5, 0x9c, 0xd9, + 0xad, 0x93, 0x6b, 0xd4, 0xd5, 0x4f, 0xce, 0x36, 0x68, 0x8b, 0xda, 0xba, 0x4b, 0xeb, 0xa5, 0xb6, + 0x6d, 0xb9, 0x16, 0x3e, 0xe1, 0x93, 0x97, 0x22, 0xf2, 0x12, 0x90, 0x97, 0x18, 0x79, 0x29, 0x22, + 0x2f, 0x09, 0xf2, 0x99, 0x13, 0x0d, 0xc3, 0xdd, 0xf0, 0xd6, 0x4a, 0x35, 0xab, 0x39, 0xdb, 0xb0, + 0x1a, 0xd6, 0x2c, 0xe7, 0xb2, 0xe6, 0xad, 0xf3, 0x2f, 0xfe, 0xc1, 0xff, 0xe7, 0x73, 0x9f, 0x39, + 0x95, 0xaa, 0xdc, 0xac, 0x4d, 0x1d, 0xcb, 0xb3, 0x6b, 0x34, 0xae, 0xd1, 0xcc, 0x83, 0xe9, 0x34, + 0x5e, 0x6b, 0x8b, 0xda, 0x4c, 0x21, 0x5a, 0xef, 0x22, 0xbb, 0x3f, 0x9d, 0x6c, 0xab, 0x6b, 0xda, + 0x33, 0x27, 0x92, 0xb1, 0x6d, 0xaf, 0xe5, 0x1a, 0xcd, 0x6e, 0x9d, 0x4e, 0x26, 0xa3, 0x7b, 0xae, + 0x61, 0xce, 0x1a, 0x2d, 0xd7, 0x71, 0xed, 0x38, 0x89, 0x56, 0x42, 0x68, 0xae, 0x72, 0xe1, 0x8a, + 0xaf, 0x2f, 0xbe, 0x13, 0x0d, 0xb5, 0xf4, 0x26, 0x2d, 0x66, 0xee, 0xcc, 0xdc, 0x5b, 0x28, 0x8f, + 0x5f, 0x7f, 0xef, 0xf8, 0x2d, 0x1f, 0xbc, 0x77, 0x7c, 0xe8, 0x22, 0x8c, 0x11, 0x0e, 0xd1, 0x5e, + 0x40, 0xd3, 0xf3, 0x95, 0xcb, 0xab, 0xba, 0xdd, 0xa0, 0xee, 0x65, 0xe0, 0x6b, 0xbc, 0xaa, 0xbb, + 0x8c, 0x72, 0x01, 0x4d, 0xb9, 0x7c, 0xb0, 0x42, 0xc1, 0x5a, 0x2d, 0x57, 0x6f, 0xf8, 0x5c, 0x86, + 0xcb, 0x45, 0xc1, 0x65, 0x6a, 0x35, 0x06, 0x27, 0x5d, 0x14, 0xda, 0xd7, 0x33, 0xe8, 0xd6, 0x79, + 0xcf, 0x71, 0xad, 0xe6, 0x0a, 0x75, 0x6d, 0xa3, 0x36, 0xef, 0xd9, 0x36, 0x80, 0xaa, 0xae, 0xee, + 0x7a, 0xce, 0x8d, 0xb5, 0xc3, 0xcf, 0xa2, 0xe1, 0x2d, 0xdd, 0xf4, 0x68, 0x31, 0x0b, 0x28, 0x63, + 0xa7, 0xee, 0x2f, 0xa5, 0xba, 0x4d, 0x29, 0x58, 0xd8, 0xd2, 0xd3, 0x9e, 0x0e, 0xd6, 0x74, 0x3b, + 0xe5, 0x69, 0xc1, 0x70, 0x5c, 0x48, 0xbd, 0xc2, 0x38, 0x11, 0x9f, 0xa1, 0xf6, 0x46, 0x06, 0xdd, + 0x91, 0xaa, 0xd9, 0xb2, 0xe1, 0xb8, 0xb8, 0x89, 0x86, 0x0d, 0x97, 0x36, 0x1d, 0x50, 0x2f, 0x07, + 0xb2, 0xcf, 0x97, 0xfa, 0x72, 0xd9, 0x52, 0x2a, 0xf3, 0xf2, 0x84, 0xd0, 0x6b, 0xf8, 0x02, 0x63, + 0x4f, 0x7c, 0x29, 0xda, 0xd7, 0x32, 0x08, 0xcb, 0x34, 0xbe, 0x75, 0x6f, 0xc2, 0x46, 0xcf, 0xec, + 0xc4, 0x46, 0x87, 0x04, 0xc3, 0x31, 0x5f, 0x9c, 0x62, 0xa2, 0xd7, 0x32, 0xe8, 0x48, 0xb7, 0x46, + 0xdc, 0x36, 0xeb, 0xaa, 0x6d, 0xe6, 0x76, 0x60, 0x1b, 0x9f, 0x6b, 0x8a, 0x51, 0x7e, 0x90, 0x45, + 0x85, 0x05, 0x9d, 0x36, 0xad, 0x56, 0x15, 0x6c, 0xf1, 0x2c, 0xca, 0x37, 0x81, 0xbe, 0xae, 0xbb, + 0x3a, 0xb7, 0xc7, 0xd8, 0xa9, 0x7b, 0x7b, 0x4c, 0x76, 0xeb, 0x64, 0xe9, 0xd2, 0xda, 0x55, 0x5a, + 0x73, 0x41, 0x8e, 0x5e, 0xc6, 0x82, 0x3f, 0x8a, 0xc6, 0x48, 0xc8, 0x0d, 0xbf, 0x84, 0x86, 0x9c, + 0x36, 0xad, 0x09, 0x13, 0x3e, 0xda, 0xe7, 0x74, 0x42, 0x0d, 0xab, 0xc0, 0x23, 0x5a, 0x23, 0xf6, + 0x45, 0x38, 0x5f, 0xb0, 0xd7, 0x88, 0xc3, 0x17, 0xbf, 0x98, 0xe3, 0x12, 0xce, 0x0e, 0x2c, 0xc1, + 0x77, 0xa1, 0x49, 0x21, 0x63, 0xc4, 0xff, 0x26, 0x82, 0xbb, 0xf6, 0xeb, 0x0c, 0x9a, 0x08, 0x71, + 0xf9, 0x4a, 0xbd, 0xd8, 0x65, 0xb3, 0xd9, 0x1e, 0x36, 0x93, 0x22, 0x5d, 0x89, 0x91, 0x73, 0xd3, + 0x4d, 0x09, 0x61, 0xf9, 0x60, 0x44, 0x32, 0xdc, 0x8b, 0x81, 0x23, 0x64, 0xb9, 0x23, 0x9c, 0x1e, + 0x74, 0x5e, 0x29, 0xeb, 0xff, 0x7b, 0x79, 0x3e, 0x55, 0xdf, 0x92, 0x79, 0x87, 0x9a, 0xb0, 0x82, + 0x96, 0x2d, 0xe6, 0xd3, 0xef, 0x6a, 0x2d, 0xeb, 0x6b, 0xd4, 0xac, 0x0a, 0x1e, 0xe5, 0x71, 0x36, + 0xb1, 0xe0, 0x8b, 0x84, 0xbc, 0xf1, 0xf3, 0x28, 0x0f, 0x1a, 0xb4, 0x4d, 0x08, 0xad, 0xc2, 0x2b, + 0x4e, 0xf4, 0xf6, 0xb5, 0x8a, 0x55, 0x5f, 0x15, 0x04, 0xdc, 0x0d, 0x42, 0xab, 0x05, 0xa3, 0x24, + 0x64, 0xa8, 0xbd, 0x9e, 0x45, 0x07, 0x62, 0x4b, 0x8a, 0xaf, 0xa0, 0x23, 0x35, 0x3f, 0x4c, 0x5c, + 0xf4, 0x9a, 0x20, 0xa0, 0x5a, 0xdb, 0xa0, 0x75, 0xcf, 0xa4, 0x75, 0x11, 0x76, 0x8f, 0x09, 0x7e, + 0x47, 0xe6, 0x13, 0xb1, 0x48, 0x0a, 0x35, 0x7e, 0x12, 0xe1, 0x16, 0x1f, 0x5a, 0x31, 0x1c, 0x27, + 0xe4, 0x99, 0xe5, 0x3c, 0x67, 0x04, 0x4f, 0x7c, 0xb1, 0x0b, 0x83, 0x24, 0x50, 0x31, 0x1d, 0xeb, + 0xd4, 0x31, 0x6c, 0x5a, 0x8f, 0xeb, 0x98, 0x53, 0x75, 0x5c, 0x48, 0xc4, 0x22, 0x29, 0xd4, 0xda, + 0x0f, 0xb3, 0x08, 0x2d, 0xd0, 0xb6, 0x69, 0x75, 0x9a, 0x30, 0x83, 0x3d, 0xdc, 0xe7, 0x2f, 0x2b, + 0xfb, 0xfc, 0xb1, 0x7e, 0xbd, 0x35, 0x54, 0x31, 0x75, 0xa3, 0x37, 0x62, 0x1b, 0xfd, 0xf1, 0xc1, + 0x45, 0xf4, 0xde, 0xe9, 0xbf, 0xc9, 0xa0, 0xc9, 0x08, 0x79, 0x3f, 0xb6, 0xfa, 0x4b, 0xea, 0x56, + 0x7f, 0x78, 0xe0, 0x99, 0xa5, 0xec, 0xf5, 0xb7, 0x72, 0x08, 0x47, 0x48, 0xc4, 0x32, 0xcd, 0x35, + 0xbd, 0xb6, 0x79, 0x13, 0x09, 0xf0, 0xbb, 0x90, 0x39, 0xbd, 0x76, 0x9d, 0x15, 0x41, 0x73, 0xad, + 0x96, 0xe5, 0xf2, 0x02, 0x26, 0x50, 0xf3, 0x33, 0x03, 0xab, 0x19, 0x68, 0x50, 0xba, 0xdc, 0xc5, + 0x7b, 0xb1, 0xe5, 0xda, 0x9d, 0x68, 0xf7, 0x74, 0x23, 0x90, 0x04, 0x85, 0xf0, 0x2b, 0x08, 0xd9, + 0x82, 0xe7, 0xaa, 0x25, 0xfc, 0xa3, 0x5f, 0x17, 0x0c, 0x94, 0x9a, 0xb7, 0x5a, 0xeb, 0x46, 0x23, + 0xf2, 0x76, 0x12, 0x32, 0x26, 0x92, 0x90, 0x99, 0x45, 0x74, 0x34, 0x45, 0x7b, 0x3c, 0x85, 0x72, + 0x9b, 0xb4, 0xe3, 0x9b, 0x95, 0xb0, 0xff, 0xe2, 0x69, 0xb9, 0x90, 0x28, 0x88, 0x2a, 0xe0, 0x4c, + 0xf6, 0x74, 0x46, 0x7b, 0x6b, 0x58, 0x76, 0x36, 0x1e, 0x87, 0xef, 0x45, 0x79, 0x1b, 0x46, 0x8c, + 0x9a, 0xee, 0x88, 0x00, 0xc5, 0x23, 0x29, 0x11, 0x63, 0x24, 0x84, 0x2a, 0x11, 0x3b, 0xbb, 0x4f, + 0x11, 0x3b, 0xb7, 0xcb, 0x11, 0x1b, 0x5b, 0x30, 0x09, 0x97, 0xd5, 0xd9, 0x8d, 0x4e, 0x71, 0x88, + 0x33, 0x9f, 0xdb, 0xc1, 0xce, 0xf6, 0x19, 0x45, 0x02, 0x83, 0x11, 0x12, 0x0a, 0xc1, 0x73, 0xe8, + 0x40, 0xd3, 0x68, 0x11, 0xaa, 0xd7, 0x3b, 0x55, 0x5a, 0xb3, 0x5a, 0x75, 0xa7, 0x38, 0xcc, 0xcd, + 0x7c, 0x54, 0x10, 0x1d, 0x58, 0x51, 0xc1, 0x24, 0x8e, 0x8f, 0x97, 0xd1, 0xb4, 0x4d, 0xb7, 0x0c, + 0xa6, 0xc6, 0x79, 0xd8, 0xce, 0x96, 0xdd, 0x59, 0x36, 0x9a, 0x86, 0x5b, 0x1c, 0xf1, 0xcb, 0x78, + 0xe0, 0x31, 0x4d, 0x12, 0xe0, 0x24, 0x91, 0x0a, 0xdf, 0x83, 0x46, 0xda, 0xba, 0xe7, 0x40, 0xac, + 0x1f, 0x05, 0xfa, 0x7c, 0x14, 0x98, 0x2a, 0x7c, 0x94, 0x08, 0x28, 0x94, 0xcd, 0xb2, 0x97, 0xe7, + 0x77, 0xc3, 0xcb, 0x27, 0xd3, 0x3d, 0x5c, 0xfb, 0x30, 0x8b, 0xa6, 0xe2, 0x41, 0x93, 0xe5, 0x3c, + 0x6b, 0xcd, 0xa1, 0xf6, 0x16, 0xad, 0x9f, 0xf3, 0xcf, 0x47, 0xc0, 0x9e, 0xbb, 0x69, 0x2e, 0xda, + 0xb5, 0x97, 0xba, 0x30, 0x48, 0x02, 0x15, 0xbe, 0x5f, 0x72, 0x74, 0x3f, 0x6b, 0x86, 0xcb, 0x96, + 0xe0, 0xec, 0xb0, 0x6c, 0x62, 0xe7, 0x07, 0x40, 0x91, 0x1a, 0xc3, 0x65, 0xbb, 0xac, 0x82, 0x49, + 0x1c, 0x1f, 0x9f, 0x43, 0x07, 0xf5, 0x2d, 0xdd, 0x30, 0xf5, 0x35, 0x93, 0x86, 0x4c, 0x86, 0x38, + 0x93, 0x5b, 0x05, 0x93, 0x83, 0x73, 0x71, 0x04, 0xd2, 0x4d, 0x83, 0x57, 0xd0, 0x21, 0xaf, 0xd5, + 0xcd, 0xca, 0x77, 0xa3, 0xdb, 0x04, 0xab, 0x43, 0x97, 0xbb, 0x51, 0x48, 0x12, 0x9d, 0xf6, 0xdb, + 0x8c, 0x1c, 0x9f, 0x03, 0x97, 0xc5, 0x67, 0xd0, 0x90, 0xdb, 0x69, 0x07, 0xf1, 0xf9, 0x9e, 0x20, + 0x3e, 0xaf, 0xc2, 0xd8, 0x47, 0xbc, 0x12, 0x88, 0x53, 0x30, 0x08, 0xe1, 0x34, 0xf8, 0x0b, 0x68, + 0x82, 0x2d, 0xa5, 0xd1, 0x6a, 0xf8, 0x56, 0x11, 0xf1, 0x61, 0x69, 0x00, 0x77, 0x09, 0x79, 0x48, + 0x79, 0xe6, 0x20, 0x28, 0x32, 0xa1, 0x00, 0x89, 0x2a, 0x0f, 0x4e, 0xbf, 0x13, 0x8b, 0xdb, 0x6d, + 0xcb, 0x76, 0x2f, 0xb5, 0xfd, 0x18, 0x0d, 0x5e, 0x4e, 0xf9, 0x00, 0x9f, 0x8f, 0xe4, 0xe5, 0x3e, + 0x1a, 0x11, 0x50, 0x7c, 0x37, 0x1a, 0xa6, 0xdb, 0x7a, 0xcd, 0xe5, 0x1a, 0xe7, 0xa3, 0x8c, 0xb6, + 0xc8, 0x06, 0x89, 0x0f, 0xd3, 0x7e, 0x04, 0x07, 0xa8, 0xa5, 0xea, 0x39, 0xdb, 0xf2, 0xda, 0xc1, + 0xe4, 0x03, 0x39, 0x9f, 0x46, 0x43, 0x36, 0x94, 0x3e, 0xc2, 0x6a, 0x77, 0x07, 0x56, 0x23, 0x30, + 0x06, 0x56, 0x3b, 0x14, 0xa3, 0xf2, 0x4d, 0xc6, 0x08, 0x20, 0x0b, 0x8f, 0xd8, 0x7a, 0xab, 0x41, + 0x83, 0xfc, 0xf6, 0x50, 0x9f, 0xb6, 0xba, 0xb0, 0x40, 0x18, 0x79, 0x34, 0x31, 0xfe, 0x09, 0x75, + 0x85, 0xcf, 0x55, 0xfb, 0x56, 0x06, 0x1d, 0x38, 0xbf, 0xba, 0x5a, 0xb9, 0xd0, 0x6a, 0xc0, 0x69, + 0xd1, 0xa9, 0xe8, 0xee, 0x06, 0x4b, 0xc1, 0x6d, 0xf8, 0x37, 0x9e, 0x82, 0x19, 0x8c, 0x70, 0x08, + 0xde, 0x40, 0xa3, 0x6c, 0x3f, 0xd2, 0x56, 0x7d, 0xc0, 0xd2, 0x4a, 0x88, 0x2b, 0xfb, 0x4c, 0xca, + 0x07, 0x84, 0x8c, 0x51, 0x31, 0x40, 0x02, 0xf6, 0xda, 0xe7, 0xd0, 0xb4, 0xa4, 0x1e, 0xb3, 0x17, + 0x3f, 0xb3, 0xe2, 0x1a, 0x1a, 0x66, 0x9a, 0x04, 0x27, 0xd2, 0x7e, 0x0f, 0x58, 0xb1, 0x29, 0x47, + 0x0b, 0xca, 0xbe, 0xa0, 0x44, 0xe1, 0xbc, 0xb5, 0x3f, 0x66, 0xd1, 0xd1, 0xf3, 0x96, 0x6d, 0xbc, + 0x6a, 0xb5, 0x5c, 0xdd, 0x84, 0xfc, 0x31, 0xe7, 0xb9, 0x96, 0x53, 0xd3, 0x4d, 0x6a, 0xef, 0x61, + 0xd1, 0x6a, 0x2a, 0x45, 0xeb, 0x93, 0xfd, 0xce, 0x2c, 0x59, 0xdf, 0xd4, 0x0a, 0xd6, 0x8d, 0x55, + 0xb0, 0xcb, 0xbb, 0x24, 0xaf, 0x77, 0x39, 0xfb, 0xcf, 0x0c, 0xba, 0x2d, 0x85, 0x72, 0x3f, 0x6a, + 0xdb, 0x4d, 0xb5, 0xb6, 0x5d, 0xda, 0x9d, 0x39, 0xa7, 0x14, 0xba, 0xff, 0xca, 0xa6, 0xce, 0x95, + 0x97, 0x56, 0xaf, 0x40, 0xad, 0xc1, 0xbe, 0x08, 0x5d, 0x17, 0x73, 0x9d, 0xef, 0x53, 0x9f, 0xaa, + 0xb7, 0x16, 0x5c, 0xf5, 0x00, 0x13, 0x0a, 0xc7, 0xc2, 0x1a, 0x95, 0xaa, 0x0d, 0xc1, 0x9c, 0x84, + 0x62, 0xf0, 0x49, 0x34, 0xc6, 0xab, 0x07, 0x25, 0xcf, 0x1d, 0x60, 0xf7, 0x42, 0x2b, 0xd1, 0x30, + 0x91, 0x71, 0xf0, 0x83, 0x40, 0xa2, 0x6f, 0xc7, 0xb2, 0x5c, 0x78, 0x9d, 0xb4, 0x12, 0x81, 0x88, + 0x8c, 0x07, 0x21, 0x7f, 0xb2, 0xd6, 0xf6, 0xa4, 0x9b, 0x46, 0x51, 0x4e, 0xf5, 0x3b, 0xc5, 0xa4, + 0x4b, 0xcb, 0x32, 0x06, 0xd1, 0x93, 0x00, 0x91, 0xc6, 0x48, 0x4c, 0x9c, 0xf6, 0xf3, 0x1c, 0xba, + 0xa3, 0xa7, 0x8f, 0xe2, 0xa5, 0x1e, 0xd5, 0xc3, 0x91, 0x3e, 0x2a, 0x87, 0x3a, 0x9a, 0x30, 0x75, + 0xc7, 0xe5, 0xe6, 0x5e, 0x35, 0x9a, 0x41, 0x76, 0xfb, 0xc4, 0x4d, 0x3a, 0x2e, 0x23, 0xf1, 0x53, + 0xd8, 0xb2, 0xcc, 0x85, 0xa8, 0x4c, 0x59, 0xc5, 0x21, 0x4e, 0xfe, 0x69, 0x15, 0xc7, 0xbc, 0x0a, + 0x26, 0x71, 0x7c, 0xc6, 0x42, 0x1c, 0xcc, 0x63, 0xf5, 0x46, 0xc8, 0x62, 0x41, 0x05, 0x93, 0x38, + 0x3e, 0x54, 0x7d, 0xc7, 0x05, 0x57, 0xd5, 0xfc, 0xd2, 0xed, 0xb1, 0x5f, 0x77, 0xdc, 0x0d, 0xec, + 0x8e, 0xcf, 0xf7, 0x46, 0x25, 0x37, 0xe2, 0xa5, 0xad, 0xa0, 0x89, 0xf3, 0x96, 0xe3, 0x56, 0x58, + 0x4a, 0x66, 0x79, 0x0b, 0xdf, 0x81, 0x72, 0xe0, 0x9c, 0xe2, 0x24, 0x32, 0x26, 0xd4, 0xce, 0x31, + 0xe7, 0x65, 0xe3, 0x1c, 0xac, 0x6f, 0x0b, 0xbf, 0x8e, 0xc0, 0xe0, 0x97, 0x6c, 0x5c, 0x3b, 0x87, + 0x46, 0x45, 0x5e, 0x94, 0x19, 0xe5, 0x7a, 0x33, 0xca, 0x25, 0x30, 0xfa, 0x5e, 0x16, 0x38, 0xf9, + 0x69, 0x64, 0x0f, 0x13, 0xc2, 0x0b, 0x4a, 0x42, 0x38, 0x33, 0x58, 0xaa, 0x4d, 0x4d, 0x00, 0xf5, + 0x58, 0x02, 0x78, 0x74, 0x40, 0xfe, 0xbd, 0x03, 0xfe, 0xdb, 0x19, 0x34, 0xa9, 0x26, 0x7d, 0x16, + 0x51, 0xd8, 0x1e, 0x32, 0x6a, 0xf4, 0x62, 0x74, 0xe0, 0x0f, 0x23, 0x4a, 0x35, 0x02, 0x11, 0x19, + 0x0f, 0xd3, 0x90, 0x8c, 0xb9, 0x83, 0x30, 0x4a, 0x29, 0x45, 0x69, 0xf6, 0x74, 0x52, 0xf2, 0x9f, + 0x4e, 0x40, 0x51, 0xf7, 0x12, 0xec, 0x79, 0x1b, 0xca, 0xc1, 0x2e, 0x31, 0xdc, 0xb3, 0x64, 0xbe, + 0xda, 0xaf, 0x32, 0x68, 0x4c, 0x28, 0xbc, 0x1f, 0x19, 0xe9, 0x79, 0x35, 0x23, 0x3d, 0x34, 0x60, + 0x3d, 0x95, 0x9c, 0x81, 0xde, 0x89, 0xe6, 0xc2, 0x2a, 0x28, 0x56, 0xe0, 0x6d, 0xc0, 0x76, 0x8a, + 0x17, 0x78, 0x6c, 0x8b, 0x11, 0x0e, 0xc1, 0x5f, 0xc9, 0xa0, 0x29, 0x23, 0x56, 0x73, 0x09, 0x53, + 0x3f, 0x3e, 0x98, 0x6a, 0x21, 0x9b, 0xe8, 0x41, 0x29, 0x0e, 0x21, 0x5d, 0x22, 0x35, 0x0f, 0x75, + 0x61, 0x61, 0x1d, 0xb4, 0x77, 0xdd, 0xf6, 0x80, 0xb9, 0x32, 0xa9, 0x9a, 0x2c, 0xe7, 0xf9, 0xf4, + 0x01, 0x42, 0x38, 0x6b, 0xed, 0xed, 0x6c, 0x68, 0xb0, 0xaa, 0xbf, 0x47, 0xc2, 0x7a, 0x37, 0xb3, + 0x1b, 0xf5, 0xee, 0x58, 0x52, 0xad, 0x0b, 0x11, 0x24, 0xe7, 0x9a, 0x83, 0xde, 0xb7, 0x09, 0x09, + 0xab, 0xcb, 0xd5, 0x28, 0x4e, 0xc1, 0x07, 0x61, 0x2c, 0xf1, 0xcb, 0x68, 0x98, 0x9d, 0x26, 0xd8, + 0x16, 0xcf, 0x0d, 0x1e, 0x42, 0x98, 0xbd, 0x22, 0x0f, 0x63, 0x5f, 0xe0, 0x61, 0x9c, 0x2f, 0x94, + 0xe9, 0x13, 0x4a, 0x1c, 0xc0, 0x57, 0xd1, 0xb8, 0x69, 0xe9, 0xf5, 0xb2, 0x6e, 0xea, 0x50, 0x8c, + 0x04, 0x77, 0xf7, 0x9f, 0xec, 0x1d, 0x11, 0x97, 0x25, 0x0a, 0x11, 0x4f, 0xc2, 0x47, 0x3d, 0x19, + 0x46, 0x14, 0xde, 0x9a, 0x8e, 0x50, 0x34, 0x7b, 0x7c, 0x1c, 0x0d, 0x33, 0x17, 0xf6, 0x4f, 0x06, + 0x85, 0x72, 0x81, 0xe9, 0xca, 0x3c, 0x1b, 0x74, 0xe5, 0xe3, 0xf8, 0x14, 0x42, 0x0e, 0xad, 0xd9, + 0xd4, 0xe5, 0x61, 0x87, 0x5f, 0x7e, 0x45, 0x01, 0xb8, 0x1a, 0x42, 0x88, 0x84, 0xa5, 0x7d, 0x23, + 0x8b, 0x72, 0x4f, 0x5a, 0x6b, 0x7b, 0x18, 0xe4, 0x9f, 0x55, 0x82, 0x7c, 0xbf, 0xfb, 0x1f, 0x74, + 0x4b, 0x0d, 0xf0, 0x9f, 0x8d, 0x05, 0xf8, 0xd3, 0x03, 0xf0, 0xee, 0x1d, 0xdc, 0x7f, 0x97, 0x43, + 0xe3, 0x80, 0x35, 0x6f, 0xb5, 0xea, 0x06, 0x2f, 0x85, 0x1e, 0x50, 0x2e, 0x09, 0xee, 0x8c, 0x5d, + 0x12, 0x4c, 0xc9, 0xb8, 0xd2, 0xf5, 0xc0, 0x95, 0x50, 0x51, 0x7f, 0x51, 0xce, 0xaa, 0xe2, 0x80, + 0xb2, 0xe7, 0xeb, 0x7b, 0x29, 0xe4, 0xa9, 0xaa, 0x07, 0xa7, 0x55, 0x5e, 0x43, 0x55, 0x6c, 0x6b, + 0xcd, 0x2f, 0xcc, 0x72, 0xfd, 0x17, 0x66, 0x87, 0x85, 0x2e, 0xbc, 0x38, 0x0b, 0x39, 0x11, 0x95, + 0x31, 0xbe, 0x86, 0x30, 0x1b, 0x58, 0x85, 0xc3, 0xb5, 0xe3, 0xcf, 0x8e, 0x89, 0x1b, 0xea, 0x5f, + 0x5c, 0x78, 0x6b, 0xb5, 0xdc, 0xc5, 0x8e, 0x24, 0x88, 0x60, 0xf7, 0x18, 0x36, 0xd5, 0x1d, 0xa8, + 0x5b, 0x87, 0xb9, 0xe9, 0xa2, 0xe3, 0x3e, 0x1f, 0x25, 0x02, 0x8a, 0xef, 0x43, 0xa3, 0x4d, 0xd8, + 0x27, 0xac, 0x3e, 0x1b, 0xe1, 0x88, 0xe1, 0xc9, 0x7b, 0xc5, 0x1f, 0x26, 0x01, 0x5c, 0xfb, 0x59, + 0x06, 0x8d, 0xc2, 0x42, 0xed, 0x47, 0xf2, 0x7b, 0x46, 0x4d, 0x7e, 0xa7, 0xfa, 0x77, 0xd0, 0x94, + 0xc4, 0xf7, 0x93, 0x1c, 0x9f, 0x03, 0x8f, 0xe1, 0x70, 0xe6, 0x69, 0xeb, 0xb6, 0x6e, 0x9a, 0xd4, + 0x34, 0x9c, 0xa6, 0x28, 0x1d, 0xf9, 0x99, 0xa7, 0x12, 0x0d, 0x13, 0x19, 0x87, 0x91, 0xd4, 0xac, + 0x66, 0xdb, 0xa4, 0xc1, 0x0b, 0x43, 0x48, 0x32, 0x1f, 0x0d, 0x13, 0x19, 0x07, 0x5f, 0x42, 0x87, + 0xf5, 0x9a, 0x6b, 0x6c, 0xd1, 0x05, 0xaa, 0xd7, 0x4d, 0xa3, 0x45, 0x83, 0xdb, 0xdc, 0x1c, 0x2f, + 0x21, 0x6f, 0x05, 0xe2, 0xc3, 0x73, 0x49, 0x08, 0x24, 0x99, 0x4e, 0xb9, 0x4e, 0x1f, 0xda, 0xc3, + 0xeb, 0xf4, 0x07, 0xd0, 0xb8, 0x0e, 0x27, 0xa3, 0x00, 0xc2, 0xfd, 0x28, 0x5f, 0x9e, 0x62, 0xa1, + 0x77, 0x4e, 0x1a, 0x27, 0x0a, 0x96, 0x72, 0x09, 0x3f, 0xb2, 0xdb, 0xcf, 0xa6, 0x3f, 0xcd, 0xa1, + 0x42, 0x18, 0x7c, 0xb0, 0x85, 0x50, 0x2d, 0xd8, 0xe0, 0xc1, 0xb5, 0xcf, 0x23, 0xfd, 0x7b, 0x4a, + 0x18, 0x24, 0xa2, 0x78, 0x1c, 0x0e, 0x39, 0x44, 0x12, 0x01, 0x11, 0xb9, 0x00, 0x01, 0xc4, 0x76, + 0x07, 0x3d, 0xcb, 0x4d, 0x00, 0xef, 0x42, 0x35, 0xe0, 0x40, 0x22, 0x66, 0xb8, 0x01, 0x87, 0xe2, + 0xd0, 0x67, 0x06, 0x8d, 0x48, 0xfe, 0xe1, 0x57, 0x61, 0x43, 0x62, 0x6c, 0x59, 0x58, 0xf0, 0xbd, + 0x4a, 0x1c, 0xf0, 0xc2, 0xb0, 0xe0, 0xbb, 0x20, 0x11, 0x50, 0x3c, 0x0b, 0x53, 0xf5, 0x6a, 0x35, + 0x4a, 0xeb, 0xb4, 0x2e, 0x0e, 0x6e, 0x07, 0x05, 0x6a, 0xa1, 0x1a, 0x00, 0x48, 0x84, 0xc3, 0x18, + 0xaf, 0xeb, 0x06, 0x7b, 0x09, 0x1e, 0x51, 0x19, 0x2f, 0xf1, 0x51, 0x22, 0xa0, 0xda, 0x3f, 0xb2, + 0x68, 0x42, 0xf1, 0x3f, 0xfc, 0xe5, 0x0c, 0xbb, 0x48, 0x70, 0x6b, 0x1b, 0x7c, 0x38, 0x58, 0xc8, + 0x95, 0x9d, 0xf8, 0x74, 0x69, 0x25, 0xe2, 0xe7, 0x3f, 0xd5, 0x49, 0xf7, 0x12, 0x21, 0x84, 0xc8, + 0x62, 0xf1, 0xeb, 0x50, 0xe0, 0xf2, 0xef, 0xc5, 0xed, 0x36, 0xab, 0x1c, 0xa4, 0x27, 0xc4, 0x73, + 0x3b, 0xd1, 0x85, 0xd0, 0x57, 0x3c, 0x38, 0x29, 0xf3, 0xfb, 0xe8, 0xb0, 0xd0, 0x5d, 0x89, 0x09, + 0x22, 0x5d, 0xa2, 0x67, 0xce, 0xa2, 0xa9, 0xf8, 0x2c, 0xfa, 0x7a, 0xb2, 0xfb, 0x4e, 0x06, 0x15, + 0xd3, 0x14, 0x61, 0xa7, 0xd8, 0x90, 0x51, 0x54, 0x1d, 0x3e, 0x45, 0x3b, 0x3e, 0xd7, 0x45, 0x94, + 0xb7, 0xda, 0xec, 0x16, 0x43, 0xbc, 0xd8, 0x15, 0xca, 0xf7, 0x05, 0xbb, 0xf2, 0x92, 0x18, 0x87, + 0xdc, 0x7b, 0x58, 0x61, 0x1f, 0x00, 0x48, 0x48, 0x8a, 0x35, 0x34, 0xc2, 0xf5, 0xf1, 0xab, 0xcc, + 0x42, 0x19, 0x31, 0x7f, 0xe0, 0xf5, 0x35, 0xa4, 0x62, 0x1f, 0xa2, 0x7d, 0x1f, 0x0a, 0x6b, 0x96, + 0x00, 0x82, 0x7b, 0xf1, 0x47, 0x58, 0x6a, 0x96, 0xd8, 0x0a, 0x1d, 0xa5, 0x6c, 0x2b, 0x4f, 0x49, + 0xc5, 0x65, 0xc4, 0xeb, 0x06, 0x35, 0xeb, 0x55, 0xf9, 0xb9, 0x51, 0x22, 0x5e, 0x92, 0x81, 0x44, + 0xc5, 0x65, 0x37, 0xfa, 0xd7, 0x98, 0xc1, 0xf9, 0xd6, 0x93, 0x6e, 0xf4, 0x9f, 0x61, 0x83, 0xc4, + 0x87, 0xb1, 0x9b, 0x92, 0xe0, 0x62, 0x4d, 0xb4, 0xd8, 0xf1, 0x8d, 0x54, 0x88, 0x6e, 0x4a, 0x88, + 0x0a, 0x26, 0x71, 0x7c, 0x7c, 0x06, 0x4d, 0xb2, 0x5e, 0x3f, 0xcb, 0x73, 0xe5, 0x77, 0xbd, 0x9c, + 0xbf, 0x7d, 0x57, 0x15, 0x08, 0x89, 0x61, 0xf2, 0xf6, 0x9e, 0x8b, 0xd4, 0xbd, 0x66, 0xd9, 0x9b, + 0x15, 0xcb, 0x34, 0x6a, 0x9d, 0x3d, 0xac, 0x3f, 0xd7, 0x94, 0xfa, 0xf3, 0x89, 0x3e, 0xf7, 0x80, + 0xa2, 0x65, 0x5a, 0x25, 0xaa, 0xfd, 0x1d, 0x9c, 0x54, 0xc1, 0x94, 0x0f, 0xa5, 0x14, 0x0d, 0xb3, + 0xa7, 0x96, 0x20, 0x22, 0xec, 0x48, 0x03, 0x76, 0x82, 0x97, 0xee, 0xf4, 0x19, 0x5b, 0xe2, 0x73, + 0x67, 0xf3, 0x5c, 0xb7, 0xad, 0xa6, 0xd8, 0xeb, 0x3b, 0x93, 0x42, 0xa9, 0x1d, 0xcd, 0x73, 0x09, + 0xb8, 0x12, 0xce, 0x5b, 0xfb, 0x43, 0x06, 0x1d, 0x54, 0x30, 0xf7, 0xa3, 0x88, 0xd2, 0xd5, 0x22, + 0xea, 0xd1, 0x9d, 0xcc, 0x2c, 0xa5, 0x9c, 0xfa, 0x6a, 0x36, 0x36, 0x2f, 0x66, 0x01, 0x48, 0xcc, + 0x63, 0x6d, 0xab, 0x5e, 0xdd, 0xcd, 0x2e, 0x2d, 0xbf, 0x2c, 0x8b, 0x98, 0x12, 0x59, 0x02, 0xfe, + 0x22, 0x98, 0x97, 0x75, 0x82, 0x38, 0x6d, 0xbd, 0x46, 0xab, 0xbb, 0xd9, 0x6b, 0x70, 0x98, 0x3d, + 0xb6, 0x5e, 0x8c, 0xb3, 0x26, 0xdd, 0xd2, 0xb4, 0x1f, 0xc7, 0x97, 0x98, 0x39, 0x19, 0x7e, 0x1a, + 0xe5, 0x79, 0x5b, 0x6e, 0xcd, 0x32, 0x45, 0x24, 0x7b, 0x90, 0xad, 0x56, 0x45, 0x8c, 0x41, 0x14, + 0xfd, 0x58, 0xcf, 0x13, 0x4c, 0x80, 0x48, 0x42, 0x36, 0x78, 0x19, 0x0d, 0xb5, 0x07, 0xbf, 0xe7, + 0xe2, 0x17, 0x1b, 0xfc, 0x72, 0x8b, 0x73, 0xd1, 0xfe, 0x1d, 0x57, 0x9b, 0x97, 0xc6, 0xce, 0xee, + 0xaf, 0x60, 0x98, 0x81, 0x53, 0x57, 0xd1, 0x46, 0xa3, 0xe2, 0xba, 0x67, 0xc0, 0xbc, 0x9b, 0x16, + 0x49, 0xa2, 0x33, 0x4d, 0x30, 0x18, 0x08, 0xe2, 0x1b, 0x93, 0x2b, 0x54, 0xf3, 0x6c, 0xc3, 0xed, + 0xec, 0x79, 0x50, 0x5d, 0x57, 0x82, 0xea, 0x42, 0x9f, 0x13, 0xec, 0xd2, 0x34, 0x35, 0xb0, 0xfe, + 0x39, 0x83, 0x0e, 0x77, 0x61, 0xef, 0x47, 0xd0, 0xa1, 0x6a, 0xd0, 0x79, 0x62, 0xa7, 0x33, 0x4c, + 0x09, 0x3c, 0xd7, 0x51, 0xc2, 0xfc, 0xb8, 0xeb, 0x9e, 0x42, 0xa8, 0x6d, 0x1b, 0x5b, 0x50, 0x6c, + 0x36, 0x44, 0xeb, 0x64, 0x3e, 0x5a, 0x93, 0x4a, 0x08, 0x21, 0x12, 0x16, 0xfe, 0x3c, 0x6b, 0x6b, + 0x5c, 0xd7, 0x3d, 0xd3, 0x9d, 0xab, 0xd7, 0xe7, 0xf5, 0xb6, 0xbe, 0x66, 0x98, 0x50, 0xf3, 0x8b, + 0x37, 0xf6, 0x42, 0x79, 0xd1, 0x6f, 0x69, 0x4c, 0xc2, 0x80, 0x1d, 0xfc, 0xf1, 0xde, 0x77, 0x10, + 0x01, 0x72, 0x87, 0xa4, 0x08, 0xc1, 0x5f, 0x82, 0x2c, 0x68, 0xfb, 0xd5, 0x59, 0x7d, 0xc1, 0xb6, + 0xda, 0x8a, 0x06, 0x7e, 0xe9, 0x74, 0x0e, 0x34, 0x28, 0x92, 0x14, 0x9c, 0x7e, 0x74, 0x48, 0x15, + 0x84, 0x5d, 0x74, 0x08, 0xce, 0xb9, 0xd6, 0x35, 0xaa, 0x5a, 0x60, 0x88, 0xcb, 0x2f, 0xb3, 0x4e, + 0x91, 0xb9, 0x6e, 0x70, 0x3f, 0xa2, 0x93, 0xd8, 0xc3, 0x41, 0x63, 0x74, 0xcb, 0x32, 0x3d, 0x88, + 0xa6, 0x50, 0x06, 0x31, 0x49, 0x2c, 0xe2, 0x8e, 0x5e, 0xf1, 0x87, 0x3e, 0x62, 0xe7, 0x87, 0x2a, + 0xbf, 0x10, 0x0a, 0xb0, 0xd8, 0x23, 0x01, 0xbb, 0xb5, 0x13, 0x7b, 0x9d, 0x9f, 0x36, 0xf2, 0x51, + 0x70, 0x39, 0x1f, 0x81, 0x88, 0x8c, 0x87, 0x9b, 0xa8, 0xb0, 0x21, 0x1e, 0x8c, 0x9c, 0xe2, 0xe8, + 0x40, 0x09, 0x51, 0x79, 0x70, 0x8a, 0x8e, 0x43, 0xc1, 0xb0, 0x43, 0x22, 0x09, 0xec, 0x5a, 0x85, + 0x7f, 0x5c, 0x58, 0xe0, 0x1d, 0x50, 0xf9, 0x28, 0x04, 0x9d, 0xf7, 0x87, 0x49, 0x00, 0x0f, 0x50, + 0x2f, 0x54, 0xe6, 0x8b, 0x85, 0x6e, 0x54, 0x18, 0x26, 0x01, 0x1c, 0xb7, 0xd1, 0xa8, 0x43, 0x97, + 0x8d, 0x96, 0xb7, 0x5d, 0x44, 0x7c, 0xeb, 0x2e, 0xf6, 0xfb, 0x2e, 0xbc, 0xc8, 0xa9, 0x63, 0xcd, + 0x28, 0x91, 0x44, 0x01, 0x27, 0x81, 0x18, 0xbc, 0x8d, 0x0a, 0xb6, 0xd7, 0x9a, 0x73, 0x2e, 0x3b, + 0xd4, 0x2e, 0x8e, 0x71, 0x99, 0xfd, 0x46, 0x65, 0x12, 0xd0, 0xc7, 0xa5, 0x86, 0x16, 0x0c, 0x31, + 0x48, 0x24, 0x0c, 0x7f, 0x33, 0x83, 0xb0, 0xe3, 0xb5, 0xe1, 0xf0, 0xca, 0x4e, 0x2c, 0xba, 0xc9, + 0xfb, 0x61, 0x9c, 0xe2, 0x38, 0xd7, 0xa1, 0xd2, 0xf7, 0x7b, 0x78, 0x9c, 0x51, 0x5c, 0x99, 0xf0, + 0x7e, 0xad, 0x1b, 0x95, 0x24, 0xe8, 0xc1, 0x96, 0x62, 0xdd, 0xe1, 0xff, 0x2f, 0x4e, 0x0c, 0xb4, + 0x14, 0xc9, 0x7d, 0x41, 0xd1, 0x52, 0x08, 0x38, 0x09, 0xc4, 0xb0, 0xde, 0x6b, 0x9b, 0xea, 0xf5, + 0x4b, 0x2d, 0xb3, 0x43, 0x2c, 0xcb, 0x5d, 0x82, 0xd8, 0xe5, 0x74, 0x1c, 0x88, 0x86, 0xc5, 0x49, + 0xee, 0x36, 0x61, 0xef, 0x35, 0x49, 0xc4, 0x22, 0x29, 0xd4, 0xbc, 0xf7, 0x5a, 0x3c, 0xe3, 0xee, + 0xed, 0x6f, 0x2c, 0x76, 0xd6, 0x7b, 0x1d, 0xa9, 0xb8, 0x67, 0xbd, 0xd7, 0x92, 0x88, 0x1b, 0xf7, + 0x5e, 0x47, 0xc8, 0xff, 0x03, 0xbd, 0xd7, 0x91, 0xb2, 0x29, 0xf9, 0xf4, 0x3f, 0xca, 0x8c, 0xfe, + 0x0f, 0x1b, 0x7c, 0xd9, 0x83, 0xce, 0x54, 0xdc, 0x01, 0x94, 0xde, 0xcf, 0xcc, 0x0d, 0x7b, 0x3f, + 0x2b, 0x68, 0x7a, 0xdd, 0x33, 0xcd, 0x0e, 0x9f, 0x8d, 0xd4, 0x4b, 0xe1, 0x5f, 0x13, 0xdf, 0x2e, + 0x28, 0xa7, 0x97, 0x12, 0x70, 0x48, 0x22, 0x65, 0x4a, 0x1f, 0x6b, 0x6e, 0xa0, 0x3e, 0xd6, 0x47, + 0xd0, 0x04, 0x8b, 0x00, 0x9d, 0x58, 0x8b, 0x47, 0x78, 0x39, 0x42, 0x64, 0x20, 0x51, 0x71, 0xb5, + 0xdb, 0xd1, 0x8c, 0xf8, 0x3f, 0xe3, 0x35, 0x6f, 0xb5, 0x5c, 0xd6, 0x47, 0x49, 0xed, 0x05, 0xaf, + 0xd9, 0xec, 0x68, 0x67, 0xc1, 0x79, 0x94, 0x8e, 0x5d, 0xdf, 0x70, 0x7e, 0x13, 0xb1, 0x68, 0xa5, + 0x90, 0x0c, 0xe7, 0x8f, 0x93, 0x10, 0x43, 0x7b, 0x3f, 0x83, 0x8e, 0xa6, 0xf4, 0x70, 0xe2, 0xab, + 0x68, 0xb2, 0xa9, 0x6f, 0x4b, 0x4d, 0xaa, 0x62, 0x7b, 0xf5, 0x7b, 0xf0, 0xe1, 0xd7, 0x2b, 0x2b, + 0x0a, 0x27, 0x12, 0xe3, 0xcc, 0x63, 0x9f, 0xbe, 0x5d, 0xf5, 0xec, 0x06, 0x1d, 0xf0, 0x78, 0xc5, + 0x5d, 0x77, 0x45, 0xf0, 0x20, 0x21, 0x37, 0xd6, 0x09, 0x5a, 0x4c, 0x4b, 0x84, 0x50, 0xd2, 0xc8, + 0xbd, 0xa0, 0x77, 0xc5, 0x7a, 0x41, 0x0f, 0x76, 0xd1, 0xed, 0x53, 0x27, 0xe8, 0x3b, 0x19, 0x74, + 0x24, 0xb9, 0x60, 0xc0, 0x9f, 0x52, 0x34, 0x3e, 0x1e, 0xd3, 0xf8, 0x40, 0x8c, 0x4a, 0xe8, 0xbb, + 0x81, 0x26, 0x45, 0x59, 0x21, 0xd8, 0xdc, 0xc4, 0x0f, 0x16, 0xb7, 0xc2, 0x9a, 0x25, 0x48, 0x90, + 0x7c, 0x1d, 0xd5, 0x31, 0x12, 0xe3, 0xab, 0x7d, 0x3b, 0x8b, 0x86, 0x79, 0x83, 0xd4, 0x1e, 0x66, + 0xb3, 0xe7, 0x94, 0x6c, 0xd6, 0xef, 0x13, 0x2a, 0xd7, 0x2e, 0x35, 0x91, 0xad, 0xc5, 0x12, 0xd9, + 0x99, 0x81, 0xb8, 0xf7, 0xce, 0x61, 0x0f, 0xa3, 0x42, 0xa8, 0x44, 0x7f, 0x71, 0x8e, 0x55, 0x0c, + 0x63, 0x92, 0x88, 0x3e, 0xa3, 0xe4, 0x96, 0x92, 0x2d, 0x06, 0xf9, 0x65, 0xad, 0x24, 0xbb, 0x14, + 0xa4, 0x09, 0xff, 0x9a, 0x3f, 0x6a, 0x71, 0xec, 0xce, 0x1e, 0x10, 0xa4, 0xfc, 0x9f, 0x27, 0x87, + 0xd7, 0x1a, 0x39, 0xee, 0xbd, 0x47, 0x04, 0xcd, 0xe4, 0xaa, 0x02, 0x25, 0x31, 0xec, 0x19, 0x88, + 0x9f, 0x8a, 0xb0, 0xbe, 0x6e, 0xe3, 0x7f, 0x91, 0x41, 0xd3, 0x49, 0x4d, 0x99, 0xac, 0xf3, 0x66, + 0xd3, 0x10, 0x5d, 0x24, 0x52, 0xe7, 0xcd, 0x53, 0x30, 0x46, 0x38, 0x24, 0xfc, 0xfd, 0x53, 0x36, + 0xf5, 0xf7, 0x4f, 0x70, 0xe4, 0x05, 0x53, 0x05, 0xf7, 0xd1, 0x39, 0xb5, 0x7f, 0x21, 0xfa, 0x31, + 0x38, 0x91, 0xb0, 0x78, 0xaf, 0x55, 0xa4, 0x8f, 0xb8, 0xc4, 0x8e, 0x9a, 0xa0, 0x24, 0x55, 0x65, + 0x3c, 0xed, 0x97, 0x19, 0x74, 0xd7, 0x0d, 0x8b, 0x69, 0x5c, 0x56, 0xc2, 0x43, 0x29, 0x16, 0x1e, + 0x8e, 0xa5, 0x33, 0xd8, 0xc7, 0x3e, 0xf7, 0x37, 0xb2, 0x08, 0xaf, 0x6e, 0x18, 0x76, 0xbd, 0xa2, + 0xdb, 0x70, 0x38, 0x15, 0x13, 0xdc, 0xc3, 0x80, 0x01, 0x16, 0xaf, 0x53, 0xa7, 0x66, 0x1b, 0xdc, + 0x48, 0x62, 0x39, 0x43, 0x8b, 0x2f, 0x44, 0x20, 0x22, 0xe3, 0x41, 0x51, 0x9b, 0x17, 0xb5, 0x62, + 0xd0, 0xac, 0xd3, 0x6f, 0xf1, 0x17, 0x79, 0x40, 0xb4, 0x3f, 0xc4, 0x00, 0xec, 0xcb, 0x80, 0xb9, + 0xf6, 0x26, 0x84, 0xfb, 0x6e, 0x83, 0x2c, 0xf8, 0xad, 0x28, 0x7b, 0x65, 0x94, 0xdb, 0xd1, 0x10, + 0xe7, 0xca, 0xac, 0x31, 0xee, 0x5f, 0x4e, 0x32, 0x89, 0x84, 0x8f, 0x6a, 0x1f, 0x66, 0xd0, 0x4c, + 0xb2, 0x4a, 0xfb, 0x51, 0x73, 0x5f, 0x55, 0x6b, 0xee, 0x7e, 0x0f, 0x78, 0xc9, 0x8a, 0xa7, 0xd4, + 0xdf, 0xef, 0x27, 0x1a, 0x7f, 0x3f, 0x66, 0xb9, 0xae, 0xce, 0x72, 0x6e, 0xc7, 0xb3, 0x4c, 0x9e, + 0x61, 0xf9, 0xbe, 0xeb, 0x7f, 0x3d, 0x76, 0xcb, 0xbb, 0xf0, 0xf7, 0x27, 0xf8, 0x7b, 0xed, 0x83, + 0x63, 0x99, 0xeb, 0xf0, 0xf7, 0x2e, 0xfc, 0xfd, 0x05, 0xfe, 0xde, 0xfc, 0xdb, 0xb1, 0x5b, 0x9e, + 0x1b, 0x15, 0x3c, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x58, 0xd8, 0xcd, 0xc3, 0x5b, 0x44, 0x00, + 0x00, } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/generated.proto index 494fee02ef38..6a343f130b9c 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/generated.proto @@ -70,25 +70,25 @@ message CustomMetricTargetList { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Spec defines the desired behavior of this daemon set. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional DaemonSetSpec spec = 2; // Status is the current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional DaemonSetStatus status = 3; } // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is a list of daemon sets. @@ -100,14 +100,14 @@ message DaemonSetSpec { // Selector is a label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors optional LabelSelector selector = 1; // Template is the object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 2; } @@ -115,17 +115,17 @@ message DaemonSetSpec { message DaemonSetStatus { // CurrentNumberScheduled is the number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md optional int32 currentNumberScheduled = 1; // NumberMisscheduled is the number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md optional int32 numberMisscheduled = 2; // DesiredNumberScheduled is the total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md optional int32 desiredNumberScheduled = 3; } @@ -274,10 +274,10 @@ message HTTPIngressRuleValue { // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { - // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. optional HorizontalPodAutoscalerSpec spec = 2; // current information about the autoscaler. @@ -355,15 +355,15 @@ message IDRange { // based virtual hosting etc. message Ingress { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional IngressSpec spec = 2; // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional IngressStatus status = 3; } @@ -379,7 +379,7 @@ message IngressBackend { // IngressList is a collection of Ingress. message IngressList { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is the list of Ingress. @@ -465,15 +465,15 @@ message IngressTLS { // Job represents the configuration of a single job. message Job { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional JobSpec spec = 2; // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional JobStatus status = 3; } @@ -501,7 +501,7 @@ message JobCondition { // JobList is a collection of jobs. message JobList { // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is the list of Job. @@ -514,7 +514,7 @@ message JobSpec { // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md optional int32 parallelism = 1; // Completions specifies the desired number of successfully finished pods the @@ -522,7 +522,7 @@ message JobSpec { // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md optional int32 completions = 2; // Optional duration in seconds relative to the startTime that the job may be active @@ -531,26 +531,26 @@ message JobSpec { // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors optional LabelSelector selector = 4; // AutoSelector controls generation of pod labels and pod selectors. // It was not present in the original extensions/v1beta1 Job definition, but exists // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite // meaning as, ManualSelector. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md optional bool autoSelector = 5; // Template is the object that describes the pod that will be created when // executing a job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 6; } // JobStatus represents the current state of a Job. message JobStatus { // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md repeated JobCondition conditions = 1; // StartTime represents time when the job was acknowledged by the Job Manager. @@ -627,7 +627,7 @@ message ListOptions { message NetworkPolicy { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Specification of the desired behavior for this NetworkPolicy. @@ -658,7 +658,7 @@ message NetworkPolicyIngressRule { // Network Policy List is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is a list of schema objects. @@ -716,7 +716,7 @@ message NetworkPolicySpec { // that will be applied to a pod and container. message PodSecurityPolicy { // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // spec defines the policy enforced. @@ -726,7 +726,7 @@ message PodSecurityPolicy { // Pod Security Policy List is a list of PodSecurityPolicy objects. message PodSecurityPolicyList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is a list of schema objects. @@ -792,29 +792,29 @@ message PodSecurityPolicySpec { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ReplicaSetSpec spec = 2; // Status is the most recently observed status of the ReplicaSet. // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status optional ReplicaSetStatus status = 3; } // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // List of ReplicaSets. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md repeated ReplicaSet items = 2; } @@ -823,25 +823,25 @@ message ReplicaSetSpec { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller optional int32 replicas = 1; // Selector is a label query over pods that should match the replica count. // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors optional LabelSelector selector = 2; // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template optional k8s.io.kubernetes.pkg.api.v1.PodTemplateSpec template = 3; } // ReplicaSetStatus represents the current status of a ReplicaSet. message ReplicaSetStatus { // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller optional int32 replicas = 1; // The number of pods that have labels matching the labels of the pod template of the replicaset. @@ -906,19 +906,19 @@ message SELinuxStrategyOptions { optional string rule = 1; // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md#security-context optional k8s.io.kubernetes.pkg.api.v1.SELinuxOptions seLinuxOptions = 2; } // represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata. optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. optional ScaleSpec spec = 2; - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only. optional ScaleStatus status = 3; } @@ -933,7 +933,7 @@ message ScaleStatus { // actual number of observed instances of the scaled object. optional int32 replicas = 1; - // label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // label query over pods that should match the replicas count. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors map selector = 2; // label selector for pods that should match the replicas count. This is a serializated @@ -941,44 +941,16 @@ message ScaleStatus { // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this // field and map-based selector field are populated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors optional string targetSelector = 3; } -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -message StorageClass { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Provisioner indicates the type of the provisioner. - optional string provisioner = 2; - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - map parameters = 3; -} - -// StorageClassList is a collection of storage classes. -message StorageClassList { - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // Items is the list of StorageClasses - repeated StorageClass items = 2; -} - // SubresourceReference contains enough information to let you inspect or modify the referred subresource. message SubresourceReference { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // Kind of the referent; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds optional string kind = 1; - // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // Name of the referent; More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names optional string name = 2; // API version of the referent @@ -1023,7 +995,7 @@ message ThirdPartyResourceData { // ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. message ThirdPartyResourceDataList { // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; // Items is the list of ThirdpartyResourceData. diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/register.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/register.go index 3ca70ed60f96..91ea9d0bcfc6 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/register.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/register.go @@ -62,8 +62,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &PodSecurityPolicyList{}, &NetworkPolicy{}, &NetworkPolicyList{}, - &StorageClass{}, - &StorageClassList{}, ) // Add the watch version that applies versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types.generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types.generated.go index 1ff75ce73d6c..40522c05fa10 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types.generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types.generated.go @@ -21048,727 +21048,15 @@ func (x *NetworkPolicyList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder z.DecSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1759 := z.EncBinary() - _ = yym1759 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1760 := !z.EncBinary() - yy2arr1760 := z.EncBasicHandle().StructToArray - var yyq1760 [5]bool - _, _, _ = yysep1760, yyq1760, yy2arr1760 - const yyr1760 bool = false - yyq1760[0] = x.Kind != "" - yyq1760[1] = x.APIVersion != "" - yyq1760[2] = true - yyq1760[4] = len(x.Parameters) != 0 - var yynn1760 int - if yyr1760 || yy2arr1760 { - r.EncodeArrayStart(5) - } else { - yynn1760 = 1 - for _, b := range yyq1760 { - if b { - yynn1760++ - } - } - r.EncodeMapStart(yynn1760) - yynn1760 = 0 - } - if yyr1760 || yy2arr1760 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1760[0] { - yym1762 := z.EncBinary() - _ = yym1762 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1760[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1763 := z.EncBinary() - _ = yym1763 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1760 || yy2arr1760 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1760[1] { - yym1765 := z.EncBinary() - _ = yym1765 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1760[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1766 := z.EncBinary() - _ = yym1766 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1760 || yy2arr1760 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1760[2] { - yy1768 := &x.ObjectMeta - yy1768.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq1760[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1769 := &x.ObjectMeta - yy1769.CodecEncodeSelf(e) - } - } - if yyr1760 || yy2arr1760 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1771 := z.EncBinary() - _ = yym1771 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("provisioner")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1772 := z.EncBinary() - _ = yym1772 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) - } - } - if yyr1760 || yy2arr1760 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1760[4] { - if x.Parameters == nil { - r.EncodeNil() - } else { - yym1774 := z.EncBinary() - _ = yym1774 - if false { - } else { - z.F.EncMapStringStringV(x.Parameters, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq1760[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("parameters")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Parameters == nil { - r.EncodeNil() - } else { - yym1775 := z.EncBinary() - _ = yym1775 - if false { - } else { - z.F.EncMapStringStringV(x.Parameters, false, e) - } - } - } - } - if yyr1760 || yy2arr1760 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1776 := z.DecBinary() - _ = yym1776 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1777 := r.ContainerType() - if yyct1777 == codecSelferValueTypeMap1234 { - yyl1777 := r.ReadMapStart() - if yyl1777 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1777, d) - } - } else if yyct1777 == codecSelferValueTypeArray1234 { - yyl1777 := r.ReadArrayStart() - if yyl1777 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1777, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1778Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1778Slc - var yyhl1778 bool = l >= 0 - for yyj1778 := 0; ; yyj1778++ { - if yyhl1778 { - if yyj1778 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1778Slc = r.DecodeBytes(yys1778Slc, true, true) - yys1778 := string(yys1778Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1778 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv1781 := &x.ObjectMeta - yyv1781.CodecDecodeSelf(d) - } - case "provisioner": - if r.TryDecodeAsNil() { - x.Provisioner = "" - } else { - x.Provisioner = string(r.DecodeString()) - } - case "parameters": - if r.TryDecodeAsNil() { - x.Parameters = nil - } else { - yyv1783 := &x.Parameters - yym1784 := z.DecBinary() - _ = yym1784 - if false { - } else { - z.F.DecMapStringStringX(yyv1783, false, d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1778) - } // end switch yys1778 - } // end for yyj1778 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1785 int - var yyb1785 bool - var yyhl1785 bool = l >= 0 - yyj1785++ - if yyhl1785 { - yyb1785 = yyj1785 > l - } else { - yyb1785 = r.CheckBreak() - } - if yyb1785 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1785++ - if yyhl1785 { - yyb1785 = yyj1785 > l - } else { - yyb1785 = r.CheckBreak() - } - if yyb1785 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1785++ - if yyhl1785 { - yyb1785 = yyj1785 > l - } else { - yyb1785 = r.CheckBreak() - } - if yyb1785 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg2_v1.ObjectMeta{} - } else { - yyv1788 := &x.ObjectMeta - yyv1788.CodecDecodeSelf(d) - } - yyj1785++ - if yyhl1785 { - yyb1785 = yyj1785 > l - } else { - yyb1785 = r.CheckBreak() - } - if yyb1785 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Provisioner = "" - } else { - x.Provisioner = string(r.DecodeString()) - } - yyj1785++ - if yyhl1785 { - yyb1785 = yyj1785 > l - } else { - yyb1785 = r.CheckBreak() - } - if yyb1785 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Parameters = nil - } else { - yyv1790 := &x.Parameters - yym1791 := z.DecBinary() - _ = yym1791 - if false { - } else { - z.F.DecMapStringStringX(yyv1790, false, d) - } - } - for { - yyj1785++ - if yyhl1785 { - yyb1785 = yyj1785 > l - } else { - yyb1785 = r.CheckBreak() - } - if yyb1785 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1785-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1792 := z.EncBinary() - _ = yym1792 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep1793 := !z.EncBinary() - yy2arr1793 := z.EncBasicHandle().StructToArray - var yyq1793 [4]bool - _, _, _ = yysep1793, yyq1793, yy2arr1793 - const yyr1793 bool = false - yyq1793[0] = x.Kind != "" - yyq1793[1] = x.APIVersion != "" - yyq1793[2] = true - var yynn1793 int - if yyr1793 || yy2arr1793 { - r.EncodeArrayStart(4) - } else { - yynn1793 = 1 - for _, b := range yyq1793 { - if b { - yynn1793++ - } - } - r.EncodeMapStart(yynn1793) - yynn1793 = 0 - } - if yyr1793 || yy2arr1793 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1793[0] { - yym1795 := z.EncBinary() - _ = yym1795 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1793[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1796 := z.EncBinary() - _ = yym1796 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr1793 || yy2arr1793 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1793[1] { - yym1798 := z.EncBinary() - _ = yym1798 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq1793[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym1799 := z.EncBinary() - _ = yym1799 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr1793 || yy2arr1793 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq1793[2] { - yy1801 := &x.ListMeta - yym1802 := z.EncBinary() - _ = yym1802 - if false { - } else if z.HasExtensions() && z.EncExt(yy1801) { - } else { - z.EncFallback(yy1801) - } - } else { - r.EncodeNil() - } - } else { - if yyq1793[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy1803 := &x.ListMeta - yym1804 := z.EncBinary() - _ = yym1804 - if false { - } else if z.HasExtensions() && z.EncExt(yy1803) { - } else { - z.EncFallback(yy1803) - } - } - } - if yyr1793 || yy2arr1793 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1806 := z.EncBinary() - _ = yym1806 - if false { - } else { - h.encSliceStorageClass(([]StorageClass)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym1807 := z.EncBinary() - _ = yym1807 - if false { - } else { - h.encSliceStorageClass(([]StorageClass)(x.Items), e) - } - } - } - if yyr1793 || yy2arr1793 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym1808 := z.DecBinary() - _ = yym1808 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct1809 := r.ContainerType() - if yyct1809 == codecSelferValueTypeMap1234 { - yyl1809 := r.ReadMapStart() - if yyl1809 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl1809, d) - } - } else if yyct1809 == codecSelferValueTypeArray1234 { - yyl1809 := r.ReadArrayStart() - if yyl1809 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl1809, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys1810Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys1810Slc - var yyhl1810 bool = l >= 0 - for yyj1810 := 0; ; yyj1810++ { - if yyhl1810 { - if yyj1810 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys1810Slc = r.DecodeBytes(yys1810Slc, true, true) - yys1810 := string(yys1810Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys1810 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv1813 := &x.ListMeta - yym1814 := z.DecBinary() - _ = yym1814 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1813) { - } else { - z.DecFallback(yyv1813, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1815 := &x.Items - yym1816 := z.DecBinary() - _ = yym1816 - if false { - } else { - h.decSliceStorageClass((*[]StorageClass)(yyv1815), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys1810) - } // end switch yys1810 - } // end for yyj1810 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj1817 int - var yyb1817 bool - var yyhl1817 bool = l >= 0 - yyj1817++ - if yyhl1817 { - yyb1817 = yyj1817 > l - } else { - yyb1817 = r.CheckBreak() - } - if yyb1817 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj1817++ - if yyhl1817 { - yyb1817 = yyj1817 > l - } else { - yyb1817 = r.CheckBreak() - } - if yyb1817 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj1817++ - if yyhl1817 { - yyb1817 = yyj1817 > l - } else { - yyb1817 = r.CheckBreak() - } - if yyb1817 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg1_unversioned.ListMeta{} - } else { - yyv1820 := &x.ListMeta - yym1821 := z.DecBinary() - _ = yym1821 - if false { - } else if z.HasExtensions() && z.DecExt(yyv1820) { - } else { - z.DecFallback(yyv1820, false) - } - } - yyj1817++ - if yyhl1817 { - yyb1817 = yyj1817 > l - } else { - yyb1817 = r.CheckBreak() - } - if yyb1817 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv1822 := &x.Items - yym1823 := z.DecBinary() - _ = yym1823 - if false { - } else { - h.decSliceStorageClass((*[]StorageClass)(yyv1822), d) - } - } - for { - yyj1817++ - if yyhl1817 { - yyb1817 = yyj1817 > l - } else { - yyb1817 = r.CheckBreak() - } - if yyb1817 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj1817-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - func (x codecSelfer1234) encSliceCustomMetricTarget(v []CustomMetricTarget, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1824 := range v { + for _, yyv1759 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1825 := &yyv1824 - yy1825.CodecEncodeSelf(e) + yy1760 := &yyv1759 + yy1760.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -21778,83 +21066,83 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1826 := *v - yyh1826, yyl1826 := z.DecSliceHelperStart() - var yyc1826 bool - if yyl1826 == 0 { - if yyv1826 == nil { - yyv1826 = []CustomMetricTarget{} - yyc1826 = true - } else if len(yyv1826) != 0 { - yyv1826 = yyv1826[:0] - yyc1826 = true + yyv1761 := *v + yyh1761, yyl1761 := z.DecSliceHelperStart() + var yyc1761 bool + if yyl1761 == 0 { + if yyv1761 == nil { + yyv1761 = []CustomMetricTarget{} + yyc1761 = true + } else if len(yyv1761) != 0 { + yyv1761 = yyv1761[:0] + yyc1761 = true } - } else if yyl1826 > 0 { - var yyrr1826, yyrl1826 int - var yyrt1826 bool - if yyl1826 > cap(yyv1826) { + } else if yyl1761 > 0 { + var yyrr1761, yyrl1761 int + var yyrt1761 bool + if yyl1761 > cap(yyv1761) { - yyrg1826 := len(yyv1826) > 0 - yyv21826 := yyv1826 - yyrl1826, yyrt1826 = z.DecInferLen(yyl1826, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1826 { - if yyrl1826 <= cap(yyv1826) { - yyv1826 = yyv1826[:yyrl1826] + yyrg1761 := len(yyv1761) > 0 + yyv21761 := yyv1761 + yyrl1761, yyrt1761 = z.DecInferLen(yyl1761, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1761 { + if yyrl1761 <= cap(yyv1761) { + yyv1761 = yyv1761[:yyrl1761] } else { - yyv1826 = make([]CustomMetricTarget, yyrl1826) + yyv1761 = make([]CustomMetricTarget, yyrl1761) } } else { - yyv1826 = make([]CustomMetricTarget, yyrl1826) + yyv1761 = make([]CustomMetricTarget, yyrl1761) } - yyc1826 = true - yyrr1826 = len(yyv1826) - if yyrg1826 { - copy(yyv1826, yyv21826) + yyc1761 = true + yyrr1761 = len(yyv1761) + if yyrg1761 { + copy(yyv1761, yyv21761) } - } else if yyl1826 != len(yyv1826) { - yyv1826 = yyv1826[:yyl1826] - yyc1826 = true + } else if yyl1761 != len(yyv1761) { + yyv1761 = yyv1761[:yyl1761] + yyc1761 = true } - yyj1826 := 0 - for ; yyj1826 < yyrr1826; yyj1826++ { - yyh1826.ElemContainerState(yyj1826) + yyj1761 := 0 + for ; yyj1761 < yyrr1761; yyj1761++ { + yyh1761.ElemContainerState(yyj1761) if r.TryDecodeAsNil() { - yyv1826[yyj1826] = CustomMetricTarget{} + yyv1761[yyj1761] = CustomMetricTarget{} } else { - yyv1827 := &yyv1826[yyj1826] - yyv1827.CodecDecodeSelf(d) + yyv1762 := &yyv1761[yyj1761] + yyv1762.CodecDecodeSelf(d) } } - if yyrt1826 { - for ; yyj1826 < yyl1826; yyj1826++ { - yyv1826 = append(yyv1826, CustomMetricTarget{}) - yyh1826.ElemContainerState(yyj1826) + if yyrt1761 { + for ; yyj1761 < yyl1761; yyj1761++ { + yyv1761 = append(yyv1761, CustomMetricTarget{}) + yyh1761.ElemContainerState(yyj1761) if r.TryDecodeAsNil() { - yyv1826[yyj1826] = CustomMetricTarget{} + yyv1761[yyj1761] = CustomMetricTarget{} } else { - yyv1828 := &yyv1826[yyj1826] - yyv1828.CodecDecodeSelf(d) + yyv1763 := &yyv1761[yyj1761] + yyv1763.CodecDecodeSelf(d) } } } } else { - yyj1826 := 0 - for ; !r.CheckBreak(); yyj1826++ { + yyj1761 := 0 + for ; !r.CheckBreak(); yyj1761++ { - if yyj1826 >= len(yyv1826) { - yyv1826 = append(yyv1826, CustomMetricTarget{}) // var yyz1826 CustomMetricTarget - yyc1826 = true + if yyj1761 >= len(yyv1761) { + yyv1761 = append(yyv1761, CustomMetricTarget{}) // var yyz1761 CustomMetricTarget + yyc1761 = true } - yyh1826.ElemContainerState(yyj1826) - if yyj1826 < len(yyv1826) { + yyh1761.ElemContainerState(yyj1761) + if yyj1761 < len(yyv1761) { if r.TryDecodeAsNil() { - yyv1826[yyj1826] = CustomMetricTarget{} + yyv1761[yyj1761] = CustomMetricTarget{} } else { - yyv1829 := &yyv1826[yyj1826] - yyv1829.CodecDecodeSelf(d) + yyv1764 := &yyv1761[yyj1761] + yyv1764.CodecDecodeSelf(d) } } else { @@ -21862,17 +21150,17 @@ func (x codecSelfer1234) decSliceCustomMetricTarget(v *[]CustomMetricTarget, d * } } - if yyj1826 < len(yyv1826) { - yyv1826 = yyv1826[:yyj1826] - yyc1826 = true - } else if yyj1826 == 0 && yyv1826 == nil { - yyv1826 = []CustomMetricTarget{} - yyc1826 = true + if yyj1761 < len(yyv1761) { + yyv1761 = yyv1761[:yyj1761] + yyc1761 = true + } else if yyj1761 == 0 && yyv1761 == nil { + yyv1761 = []CustomMetricTarget{} + yyc1761 = true } } - yyh1826.End() - if yyc1826 { - *v = yyv1826 + yyh1761.End() + if yyc1761 { + *v = yyv1761 } } @@ -21881,10 +21169,10 @@ func (x codecSelfer1234) encSliceCustomMetricCurrentStatus(v []CustomMetricCurre z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1830 := range v { + for _, yyv1765 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1831 := &yyv1830 - yy1831.CodecEncodeSelf(e) + yy1766 := &yyv1765 + yy1766.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -21894,83 +21182,83 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1832 := *v - yyh1832, yyl1832 := z.DecSliceHelperStart() - var yyc1832 bool - if yyl1832 == 0 { - if yyv1832 == nil { - yyv1832 = []CustomMetricCurrentStatus{} - yyc1832 = true - } else if len(yyv1832) != 0 { - yyv1832 = yyv1832[:0] - yyc1832 = true + yyv1767 := *v + yyh1767, yyl1767 := z.DecSliceHelperStart() + var yyc1767 bool + if yyl1767 == 0 { + if yyv1767 == nil { + yyv1767 = []CustomMetricCurrentStatus{} + yyc1767 = true + } else if len(yyv1767) != 0 { + yyv1767 = yyv1767[:0] + yyc1767 = true } - } else if yyl1832 > 0 { - var yyrr1832, yyrl1832 int - var yyrt1832 bool - if yyl1832 > cap(yyv1832) { + } else if yyl1767 > 0 { + var yyrr1767, yyrl1767 int + var yyrt1767 bool + if yyl1767 > cap(yyv1767) { - yyrg1832 := len(yyv1832) > 0 - yyv21832 := yyv1832 - yyrl1832, yyrt1832 = z.DecInferLen(yyl1832, z.DecBasicHandle().MaxInitLen, 72) - if yyrt1832 { - if yyrl1832 <= cap(yyv1832) { - yyv1832 = yyv1832[:yyrl1832] + yyrg1767 := len(yyv1767) > 0 + yyv21767 := yyv1767 + yyrl1767, yyrt1767 = z.DecInferLen(yyl1767, z.DecBasicHandle().MaxInitLen, 72) + if yyrt1767 { + if yyrl1767 <= cap(yyv1767) { + yyv1767 = yyv1767[:yyrl1767] } else { - yyv1832 = make([]CustomMetricCurrentStatus, yyrl1832) + yyv1767 = make([]CustomMetricCurrentStatus, yyrl1767) } } else { - yyv1832 = make([]CustomMetricCurrentStatus, yyrl1832) + yyv1767 = make([]CustomMetricCurrentStatus, yyrl1767) } - yyc1832 = true - yyrr1832 = len(yyv1832) - if yyrg1832 { - copy(yyv1832, yyv21832) + yyc1767 = true + yyrr1767 = len(yyv1767) + if yyrg1767 { + copy(yyv1767, yyv21767) } - } else if yyl1832 != len(yyv1832) { - yyv1832 = yyv1832[:yyl1832] - yyc1832 = true + } else if yyl1767 != len(yyv1767) { + yyv1767 = yyv1767[:yyl1767] + yyc1767 = true } - yyj1832 := 0 - for ; yyj1832 < yyrr1832; yyj1832++ { - yyh1832.ElemContainerState(yyj1832) + yyj1767 := 0 + for ; yyj1767 < yyrr1767; yyj1767++ { + yyh1767.ElemContainerState(yyj1767) if r.TryDecodeAsNil() { - yyv1832[yyj1832] = CustomMetricCurrentStatus{} + yyv1767[yyj1767] = CustomMetricCurrentStatus{} } else { - yyv1833 := &yyv1832[yyj1832] - yyv1833.CodecDecodeSelf(d) + yyv1768 := &yyv1767[yyj1767] + yyv1768.CodecDecodeSelf(d) } } - if yyrt1832 { - for ; yyj1832 < yyl1832; yyj1832++ { - yyv1832 = append(yyv1832, CustomMetricCurrentStatus{}) - yyh1832.ElemContainerState(yyj1832) + if yyrt1767 { + for ; yyj1767 < yyl1767; yyj1767++ { + yyv1767 = append(yyv1767, CustomMetricCurrentStatus{}) + yyh1767.ElemContainerState(yyj1767) if r.TryDecodeAsNil() { - yyv1832[yyj1832] = CustomMetricCurrentStatus{} + yyv1767[yyj1767] = CustomMetricCurrentStatus{} } else { - yyv1834 := &yyv1832[yyj1832] - yyv1834.CodecDecodeSelf(d) + yyv1769 := &yyv1767[yyj1767] + yyv1769.CodecDecodeSelf(d) } } } } else { - yyj1832 := 0 - for ; !r.CheckBreak(); yyj1832++ { + yyj1767 := 0 + for ; !r.CheckBreak(); yyj1767++ { - if yyj1832 >= len(yyv1832) { - yyv1832 = append(yyv1832, CustomMetricCurrentStatus{}) // var yyz1832 CustomMetricCurrentStatus - yyc1832 = true + if yyj1767 >= len(yyv1767) { + yyv1767 = append(yyv1767, CustomMetricCurrentStatus{}) // var yyz1767 CustomMetricCurrentStatus + yyc1767 = true } - yyh1832.ElemContainerState(yyj1832) - if yyj1832 < len(yyv1832) { + yyh1767.ElemContainerState(yyj1767) + if yyj1767 < len(yyv1767) { if r.TryDecodeAsNil() { - yyv1832[yyj1832] = CustomMetricCurrentStatus{} + yyv1767[yyj1767] = CustomMetricCurrentStatus{} } else { - yyv1835 := &yyv1832[yyj1832] - yyv1835.CodecDecodeSelf(d) + yyv1770 := &yyv1767[yyj1767] + yyv1770.CodecDecodeSelf(d) } } else { @@ -21978,17 +21266,17 @@ func (x codecSelfer1234) decSliceCustomMetricCurrentStatus(v *[]CustomMetricCurr } } - if yyj1832 < len(yyv1832) { - yyv1832 = yyv1832[:yyj1832] - yyc1832 = true - } else if yyj1832 == 0 && yyv1832 == nil { - yyv1832 = []CustomMetricCurrentStatus{} - yyc1832 = true + if yyj1767 < len(yyv1767) { + yyv1767 = yyv1767[:yyj1767] + yyc1767 = true + } else if yyj1767 == 0 && yyv1767 == nil { + yyv1767 = []CustomMetricCurrentStatus{} + yyc1767 = true } } - yyh1832.End() - if yyc1832 { - *v = yyv1832 + yyh1767.End() + if yyc1767 { + *v = yyv1767 } } @@ -21997,10 +21285,10 @@ func (x codecSelfer1234) encSliceHorizontalPodAutoscaler(v []HorizontalPodAutosc z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1836 := range v { + for _, yyv1771 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1837 := &yyv1836 - yy1837.CodecEncodeSelf(e) + yy1772 := &yyv1771 + yy1772.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22010,83 +21298,83 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1838 := *v - yyh1838, yyl1838 := z.DecSliceHelperStart() - var yyc1838 bool - if yyl1838 == 0 { - if yyv1838 == nil { - yyv1838 = []HorizontalPodAutoscaler{} - yyc1838 = true - } else if len(yyv1838) != 0 { - yyv1838 = yyv1838[:0] - yyc1838 = true + yyv1773 := *v + yyh1773, yyl1773 := z.DecSliceHelperStart() + var yyc1773 bool + if yyl1773 == 0 { + if yyv1773 == nil { + yyv1773 = []HorizontalPodAutoscaler{} + yyc1773 = true + } else if len(yyv1773) != 0 { + yyv1773 = yyv1773[:0] + yyc1773 = true } - } else if yyl1838 > 0 { - var yyrr1838, yyrl1838 int - var yyrt1838 bool - if yyl1838 > cap(yyv1838) { + } else if yyl1773 > 0 { + var yyrr1773, yyrl1773 int + var yyrt1773 bool + if yyl1773 > cap(yyv1773) { - yyrg1838 := len(yyv1838) > 0 - yyv21838 := yyv1838 - yyrl1838, yyrt1838 = z.DecInferLen(yyl1838, z.DecBasicHandle().MaxInitLen, 376) - if yyrt1838 { - if yyrl1838 <= cap(yyv1838) { - yyv1838 = yyv1838[:yyrl1838] + yyrg1773 := len(yyv1773) > 0 + yyv21773 := yyv1773 + yyrl1773, yyrt1773 = z.DecInferLen(yyl1773, z.DecBasicHandle().MaxInitLen, 376) + if yyrt1773 { + if yyrl1773 <= cap(yyv1773) { + yyv1773 = yyv1773[:yyrl1773] } else { - yyv1838 = make([]HorizontalPodAutoscaler, yyrl1838) + yyv1773 = make([]HorizontalPodAutoscaler, yyrl1773) } } else { - yyv1838 = make([]HorizontalPodAutoscaler, yyrl1838) + yyv1773 = make([]HorizontalPodAutoscaler, yyrl1773) } - yyc1838 = true - yyrr1838 = len(yyv1838) - if yyrg1838 { - copy(yyv1838, yyv21838) + yyc1773 = true + yyrr1773 = len(yyv1773) + if yyrg1773 { + copy(yyv1773, yyv21773) } - } else if yyl1838 != len(yyv1838) { - yyv1838 = yyv1838[:yyl1838] - yyc1838 = true + } else if yyl1773 != len(yyv1773) { + yyv1773 = yyv1773[:yyl1773] + yyc1773 = true } - yyj1838 := 0 - for ; yyj1838 < yyrr1838; yyj1838++ { - yyh1838.ElemContainerState(yyj1838) + yyj1773 := 0 + for ; yyj1773 < yyrr1773; yyj1773++ { + yyh1773.ElemContainerState(yyj1773) if r.TryDecodeAsNil() { - yyv1838[yyj1838] = HorizontalPodAutoscaler{} + yyv1773[yyj1773] = HorizontalPodAutoscaler{} } else { - yyv1839 := &yyv1838[yyj1838] - yyv1839.CodecDecodeSelf(d) + yyv1774 := &yyv1773[yyj1773] + yyv1774.CodecDecodeSelf(d) } } - if yyrt1838 { - for ; yyj1838 < yyl1838; yyj1838++ { - yyv1838 = append(yyv1838, HorizontalPodAutoscaler{}) - yyh1838.ElemContainerState(yyj1838) + if yyrt1773 { + for ; yyj1773 < yyl1773; yyj1773++ { + yyv1773 = append(yyv1773, HorizontalPodAutoscaler{}) + yyh1773.ElemContainerState(yyj1773) if r.TryDecodeAsNil() { - yyv1838[yyj1838] = HorizontalPodAutoscaler{} + yyv1773[yyj1773] = HorizontalPodAutoscaler{} } else { - yyv1840 := &yyv1838[yyj1838] - yyv1840.CodecDecodeSelf(d) + yyv1775 := &yyv1773[yyj1773] + yyv1775.CodecDecodeSelf(d) } } } } else { - yyj1838 := 0 - for ; !r.CheckBreak(); yyj1838++ { + yyj1773 := 0 + for ; !r.CheckBreak(); yyj1773++ { - if yyj1838 >= len(yyv1838) { - yyv1838 = append(yyv1838, HorizontalPodAutoscaler{}) // var yyz1838 HorizontalPodAutoscaler - yyc1838 = true + if yyj1773 >= len(yyv1773) { + yyv1773 = append(yyv1773, HorizontalPodAutoscaler{}) // var yyz1773 HorizontalPodAutoscaler + yyc1773 = true } - yyh1838.ElemContainerState(yyj1838) - if yyj1838 < len(yyv1838) { + yyh1773.ElemContainerState(yyj1773) + if yyj1773 < len(yyv1773) { if r.TryDecodeAsNil() { - yyv1838[yyj1838] = HorizontalPodAutoscaler{} + yyv1773[yyj1773] = HorizontalPodAutoscaler{} } else { - yyv1841 := &yyv1838[yyj1838] - yyv1841.CodecDecodeSelf(d) + yyv1776 := &yyv1773[yyj1773] + yyv1776.CodecDecodeSelf(d) } } else { @@ -22094,17 +21382,17 @@ func (x codecSelfer1234) decSliceHorizontalPodAutoscaler(v *[]HorizontalPodAutos } } - if yyj1838 < len(yyv1838) { - yyv1838 = yyv1838[:yyj1838] - yyc1838 = true - } else if yyj1838 == 0 && yyv1838 == nil { - yyv1838 = []HorizontalPodAutoscaler{} - yyc1838 = true + if yyj1773 < len(yyv1773) { + yyv1773 = yyv1773[:yyj1773] + yyc1773 = true + } else if yyj1773 == 0 && yyv1773 == nil { + yyv1773 = []HorizontalPodAutoscaler{} + yyc1773 = true } } - yyh1838.End() - if yyc1838 { - *v = yyv1838 + yyh1773.End() + if yyc1773 { + *v = yyv1773 } } @@ -22113,10 +21401,10 @@ func (x codecSelfer1234) encSliceAPIVersion(v []APIVersion, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1842 := range v { + for _, yyv1777 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1843 := &yyv1842 - yy1843.CodecEncodeSelf(e) + yy1778 := &yyv1777 + yy1778.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22126,83 +21414,83 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1844 := *v - yyh1844, yyl1844 := z.DecSliceHelperStart() - var yyc1844 bool - if yyl1844 == 0 { - if yyv1844 == nil { - yyv1844 = []APIVersion{} - yyc1844 = true - } else if len(yyv1844) != 0 { - yyv1844 = yyv1844[:0] - yyc1844 = true + yyv1779 := *v + yyh1779, yyl1779 := z.DecSliceHelperStart() + var yyc1779 bool + if yyl1779 == 0 { + if yyv1779 == nil { + yyv1779 = []APIVersion{} + yyc1779 = true + } else if len(yyv1779) != 0 { + yyv1779 = yyv1779[:0] + yyc1779 = true } - } else if yyl1844 > 0 { - var yyrr1844, yyrl1844 int - var yyrt1844 bool - if yyl1844 > cap(yyv1844) { + } else if yyl1779 > 0 { + var yyrr1779, yyrl1779 int + var yyrt1779 bool + if yyl1779 > cap(yyv1779) { - yyrg1844 := len(yyv1844) > 0 - yyv21844 := yyv1844 - yyrl1844, yyrt1844 = z.DecInferLen(yyl1844, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1844 { - if yyrl1844 <= cap(yyv1844) { - yyv1844 = yyv1844[:yyrl1844] + yyrg1779 := len(yyv1779) > 0 + yyv21779 := yyv1779 + yyrl1779, yyrt1779 = z.DecInferLen(yyl1779, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1779 { + if yyrl1779 <= cap(yyv1779) { + yyv1779 = yyv1779[:yyrl1779] } else { - yyv1844 = make([]APIVersion, yyrl1844) + yyv1779 = make([]APIVersion, yyrl1779) } } else { - yyv1844 = make([]APIVersion, yyrl1844) + yyv1779 = make([]APIVersion, yyrl1779) } - yyc1844 = true - yyrr1844 = len(yyv1844) - if yyrg1844 { - copy(yyv1844, yyv21844) + yyc1779 = true + yyrr1779 = len(yyv1779) + if yyrg1779 { + copy(yyv1779, yyv21779) } - } else if yyl1844 != len(yyv1844) { - yyv1844 = yyv1844[:yyl1844] - yyc1844 = true + } else if yyl1779 != len(yyv1779) { + yyv1779 = yyv1779[:yyl1779] + yyc1779 = true } - yyj1844 := 0 - for ; yyj1844 < yyrr1844; yyj1844++ { - yyh1844.ElemContainerState(yyj1844) + yyj1779 := 0 + for ; yyj1779 < yyrr1779; yyj1779++ { + yyh1779.ElemContainerState(yyj1779) if r.TryDecodeAsNil() { - yyv1844[yyj1844] = APIVersion{} + yyv1779[yyj1779] = APIVersion{} } else { - yyv1845 := &yyv1844[yyj1844] - yyv1845.CodecDecodeSelf(d) + yyv1780 := &yyv1779[yyj1779] + yyv1780.CodecDecodeSelf(d) } } - if yyrt1844 { - for ; yyj1844 < yyl1844; yyj1844++ { - yyv1844 = append(yyv1844, APIVersion{}) - yyh1844.ElemContainerState(yyj1844) + if yyrt1779 { + for ; yyj1779 < yyl1779; yyj1779++ { + yyv1779 = append(yyv1779, APIVersion{}) + yyh1779.ElemContainerState(yyj1779) if r.TryDecodeAsNil() { - yyv1844[yyj1844] = APIVersion{} + yyv1779[yyj1779] = APIVersion{} } else { - yyv1846 := &yyv1844[yyj1844] - yyv1846.CodecDecodeSelf(d) + yyv1781 := &yyv1779[yyj1779] + yyv1781.CodecDecodeSelf(d) } } } } else { - yyj1844 := 0 - for ; !r.CheckBreak(); yyj1844++ { + yyj1779 := 0 + for ; !r.CheckBreak(); yyj1779++ { - if yyj1844 >= len(yyv1844) { - yyv1844 = append(yyv1844, APIVersion{}) // var yyz1844 APIVersion - yyc1844 = true + if yyj1779 >= len(yyv1779) { + yyv1779 = append(yyv1779, APIVersion{}) // var yyz1779 APIVersion + yyc1779 = true } - yyh1844.ElemContainerState(yyj1844) - if yyj1844 < len(yyv1844) { + yyh1779.ElemContainerState(yyj1779) + if yyj1779 < len(yyv1779) { if r.TryDecodeAsNil() { - yyv1844[yyj1844] = APIVersion{} + yyv1779[yyj1779] = APIVersion{} } else { - yyv1847 := &yyv1844[yyj1844] - yyv1847.CodecDecodeSelf(d) + yyv1782 := &yyv1779[yyj1779] + yyv1782.CodecDecodeSelf(d) } } else { @@ -22210,17 +21498,17 @@ func (x codecSelfer1234) decSliceAPIVersion(v *[]APIVersion, d *codec1978.Decode } } - if yyj1844 < len(yyv1844) { - yyv1844 = yyv1844[:yyj1844] - yyc1844 = true - } else if yyj1844 == 0 && yyv1844 == nil { - yyv1844 = []APIVersion{} - yyc1844 = true + if yyj1779 < len(yyv1779) { + yyv1779 = yyv1779[:yyj1779] + yyc1779 = true + } else if yyj1779 == 0 && yyv1779 == nil { + yyv1779 = []APIVersion{} + yyc1779 = true } } - yyh1844.End() - if yyc1844 { - *v = yyv1844 + yyh1779.End() + if yyc1779 { + *v = yyv1779 } } @@ -22229,10 +21517,10 @@ func (x codecSelfer1234) encSliceThirdPartyResource(v []ThirdPartyResource, e *c z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1848 := range v { + for _, yyv1783 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1849 := &yyv1848 - yy1849.CodecEncodeSelf(e) + yy1784 := &yyv1783 + yy1784.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22242,83 +21530,83 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1850 := *v - yyh1850, yyl1850 := z.DecSliceHelperStart() - var yyc1850 bool - if yyl1850 == 0 { - if yyv1850 == nil { - yyv1850 = []ThirdPartyResource{} - yyc1850 = true - } else if len(yyv1850) != 0 { - yyv1850 = yyv1850[:0] - yyc1850 = true + yyv1785 := *v + yyh1785, yyl1785 := z.DecSliceHelperStart() + var yyc1785 bool + if yyl1785 == 0 { + if yyv1785 == nil { + yyv1785 = []ThirdPartyResource{} + yyc1785 = true + } else if len(yyv1785) != 0 { + yyv1785 = yyv1785[:0] + yyc1785 = true } - } else if yyl1850 > 0 { - var yyrr1850, yyrl1850 int - var yyrt1850 bool - if yyl1850 > cap(yyv1850) { + } else if yyl1785 > 0 { + var yyrr1785, yyrl1785 int + var yyrt1785 bool + if yyl1785 > cap(yyv1785) { - yyrg1850 := len(yyv1850) > 0 - yyv21850 := yyv1850 - yyrl1850, yyrt1850 = z.DecInferLen(yyl1850, z.DecBasicHandle().MaxInitLen, 296) - if yyrt1850 { - if yyrl1850 <= cap(yyv1850) { - yyv1850 = yyv1850[:yyrl1850] + yyrg1785 := len(yyv1785) > 0 + yyv21785 := yyv1785 + yyrl1785, yyrt1785 = z.DecInferLen(yyl1785, z.DecBasicHandle().MaxInitLen, 296) + if yyrt1785 { + if yyrl1785 <= cap(yyv1785) { + yyv1785 = yyv1785[:yyrl1785] } else { - yyv1850 = make([]ThirdPartyResource, yyrl1850) + yyv1785 = make([]ThirdPartyResource, yyrl1785) } } else { - yyv1850 = make([]ThirdPartyResource, yyrl1850) + yyv1785 = make([]ThirdPartyResource, yyrl1785) } - yyc1850 = true - yyrr1850 = len(yyv1850) - if yyrg1850 { - copy(yyv1850, yyv21850) + yyc1785 = true + yyrr1785 = len(yyv1785) + if yyrg1785 { + copy(yyv1785, yyv21785) } - } else if yyl1850 != len(yyv1850) { - yyv1850 = yyv1850[:yyl1850] - yyc1850 = true + } else if yyl1785 != len(yyv1785) { + yyv1785 = yyv1785[:yyl1785] + yyc1785 = true } - yyj1850 := 0 - for ; yyj1850 < yyrr1850; yyj1850++ { - yyh1850.ElemContainerState(yyj1850) + yyj1785 := 0 + for ; yyj1785 < yyrr1785; yyj1785++ { + yyh1785.ElemContainerState(yyj1785) if r.TryDecodeAsNil() { - yyv1850[yyj1850] = ThirdPartyResource{} + yyv1785[yyj1785] = ThirdPartyResource{} } else { - yyv1851 := &yyv1850[yyj1850] - yyv1851.CodecDecodeSelf(d) + yyv1786 := &yyv1785[yyj1785] + yyv1786.CodecDecodeSelf(d) } } - if yyrt1850 { - for ; yyj1850 < yyl1850; yyj1850++ { - yyv1850 = append(yyv1850, ThirdPartyResource{}) - yyh1850.ElemContainerState(yyj1850) + if yyrt1785 { + for ; yyj1785 < yyl1785; yyj1785++ { + yyv1785 = append(yyv1785, ThirdPartyResource{}) + yyh1785.ElemContainerState(yyj1785) if r.TryDecodeAsNil() { - yyv1850[yyj1850] = ThirdPartyResource{} + yyv1785[yyj1785] = ThirdPartyResource{} } else { - yyv1852 := &yyv1850[yyj1850] - yyv1852.CodecDecodeSelf(d) + yyv1787 := &yyv1785[yyj1785] + yyv1787.CodecDecodeSelf(d) } } } } else { - yyj1850 := 0 - for ; !r.CheckBreak(); yyj1850++ { + yyj1785 := 0 + for ; !r.CheckBreak(); yyj1785++ { - if yyj1850 >= len(yyv1850) { - yyv1850 = append(yyv1850, ThirdPartyResource{}) // var yyz1850 ThirdPartyResource - yyc1850 = true + if yyj1785 >= len(yyv1785) { + yyv1785 = append(yyv1785, ThirdPartyResource{}) // var yyz1785 ThirdPartyResource + yyc1785 = true } - yyh1850.ElemContainerState(yyj1850) - if yyj1850 < len(yyv1850) { + yyh1785.ElemContainerState(yyj1785) + if yyj1785 < len(yyv1785) { if r.TryDecodeAsNil() { - yyv1850[yyj1850] = ThirdPartyResource{} + yyv1785[yyj1785] = ThirdPartyResource{} } else { - yyv1853 := &yyv1850[yyj1850] - yyv1853.CodecDecodeSelf(d) + yyv1788 := &yyv1785[yyj1785] + yyv1788.CodecDecodeSelf(d) } } else { @@ -22326,17 +21614,17 @@ func (x codecSelfer1234) decSliceThirdPartyResource(v *[]ThirdPartyResource, d * } } - if yyj1850 < len(yyv1850) { - yyv1850 = yyv1850[:yyj1850] - yyc1850 = true - } else if yyj1850 == 0 && yyv1850 == nil { - yyv1850 = []ThirdPartyResource{} - yyc1850 = true + if yyj1785 < len(yyv1785) { + yyv1785 = yyv1785[:yyj1785] + yyc1785 = true + } else if yyj1785 == 0 && yyv1785 == nil { + yyv1785 = []ThirdPartyResource{} + yyc1785 = true } } - yyh1850.End() - if yyc1850 { - *v = yyv1850 + yyh1785.End() + if yyc1785 { + *v = yyv1785 } } @@ -22345,10 +21633,10 @@ func (x codecSelfer1234) encSliceDeployment(v []Deployment, e *codec1978.Encoder z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1854 := range v { + for _, yyv1789 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1855 := &yyv1854 - yy1855.CodecEncodeSelf(e) + yy1790 := &yyv1789 + yy1790.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22358,83 +21646,83 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1856 := *v - yyh1856, yyl1856 := z.DecSliceHelperStart() - var yyc1856 bool - if yyl1856 == 0 { - if yyv1856 == nil { - yyv1856 = []Deployment{} - yyc1856 = true - } else if len(yyv1856) != 0 { - yyv1856 = yyv1856[:0] - yyc1856 = true + yyv1791 := *v + yyh1791, yyl1791 := z.DecSliceHelperStart() + var yyc1791 bool + if yyl1791 == 0 { + if yyv1791 == nil { + yyv1791 = []Deployment{} + yyc1791 = true + } else if len(yyv1791) != 0 { + yyv1791 = yyv1791[:0] + yyc1791 = true } - } else if yyl1856 > 0 { - var yyrr1856, yyrl1856 int - var yyrt1856 bool - if yyl1856 > cap(yyv1856) { + } else if yyl1791 > 0 { + var yyrr1791, yyrl1791 int + var yyrt1791 bool + if yyl1791 > cap(yyv1791) { - yyrg1856 := len(yyv1856) > 0 - yyv21856 := yyv1856 - yyrl1856, yyrt1856 = z.DecInferLen(yyl1856, z.DecBasicHandle().MaxInitLen, 824) - if yyrt1856 { - if yyrl1856 <= cap(yyv1856) { - yyv1856 = yyv1856[:yyrl1856] + yyrg1791 := len(yyv1791) > 0 + yyv21791 := yyv1791 + yyrl1791, yyrt1791 = z.DecInferLen(yyl1791, z.DecBasicHandle().MaxInitLen, 824) + if yyrt1791 { + if yyrl1791 <= cap(yyv1791) { + yyv1791 = yyv1791[:yyrl1791] } else { - yyv1856 = make([]Deployment, yyrl1856) + yyv1791 = make([]Deployment, yyrl1791) } } else { - yyv1856 = make([]Deployment, yyrl1856) + yyv1791 = make([]Deployment, yyrl1791) } - yyc1856 = true - yyrr1856 = len(yyv1856) - if yyrg1856 { - copy(yyv1856, yyv21856) + yyc1791 = true + yyrr1791 = len(yyv1791) + if yyrg1791 { + copy(yyv1791, yyv21791) } - } else if yyl1856 != len(yyv1856) { - yyv1856 = yyv1856[:yyl1856] - yyc1856 = true + } else if yyl1791 != len(yyv1791) { + yyv1791 = yyv1791[:yyl1791] + yyc1791 = true } - yyj1856 := 0 - for ; yyj1856 < yyrr1856; yyj1856++ { - yyh1856.ElemContainerState(yyj1856) + yyj1791 := 0 + for ; yyj1791 < yyrr1791; yyj1791++ { + yyh1791.ElemContainerState(yyj1791) if r.TryDecodeAsNil() { - yyv1856[yyj1856] = Deployment{} + yyv1791[yyj1791] = Deployment{} } else { - yyv1857 := &yyv1856[yyj1856] - yyv1857.CodecDecodeSelf(d) + yyv1792 := &yyv1791[yyj1791] + yyv1792.CodecDecodeSelf(d) } } - if yyrt1856 { - for ; yyj1856 < yyl1856; yyj1856++ { - yyv1856 = append(yyv1856, Deployment{}) - yyh1856.ElemContainerState(yyj1856) + if yyrt1791 { + for ; yyj1791 < yyl1791; yyj1791++ { + yyv1791 = append(yyv1791, Deployment{}) + yyh1791.ElemContainerState(yyj1791) if r.TryDecodeAsNil() { - yyv1856[yyj1856] = Deployment{} + yyv1791[yyj1791] = Deployment{} } else { - yyv1858 := &yyv1856[yyj1856] - yyv1858.CodecDecodeSelf(d) + yyv1793 := &yyv1791[yyj1791] + yyv1793.CodecDecodeSelf(d) } } } } else { - yyj1856 := 0 - for ; !r.CheckBreak(); yyj1856++ { + yyj1791 := 0 + for ; !r.CheckBreak(); yyj1791++ { - if yyj1856 >= len(yyv1856) { - yyv1856 = append(yyv1856, Deployment{}) // var yyz1856 Deployment - yyc1856 = true + if yyj1791 >= len(yyv1791) { + yyv1791 = append(yyv1791, Deployment{}) // var yyz1791 Deployment + yyc1791 = true } - yyh1856.ElemContainerState(yyj1856) - if yyj1856 < len(yyv1856) { + yyh1791.ElemContainerState(yyj1791) + if yyj1791 < len(yyv1791) { if r.TryDecodeAsNil() { - yyv1856[yyj1856] = Deployment{} + yyv1791[yyj1791] = Deployment{} } else { - yyv1859 := &yyv1856[yyj1856] - yyv1859.CodecDecodeSelf(d) + yyv1794 := &yyv1791[yyj1791] + yyv1794.CodecDecodeSelf(d) } } else { @@ -22442,17 +21730,17 @@ func (x codecSelfer1234) decSliceDeployment(v *[]Deployment, d *codec1978.Decode } } - if yyj1856 < len(yyv1856) { - yyv1856 = yyv1856[:yyj1856] - yyc1856 = true - } else if yyj1856 == 0 && yyv1856 == nil { - yyv1856 = []Deployment{} - yyc1856 = true + if yyj1791 < len(yyv1791) { + yyv1791 = yyv1791[:yyj1791] + yyc1791 = true + } else if yyj1791 == 0 && yyv1791 == nil { + yyv1791 = []Deployment{} + yyc1791 = true } } - yyh1856.End() - if yyc1856 { - *v = yyv1856 + yyh1791.End() + if yyc1791 { + *v = yyv1791 } } @@ -22461,10 +21749,10 @@ func (x codecSelfer1234) encSliceDaemonSet(v []DaemonSet, e *codec1978.Encoder) z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1860 := range v { + for _, yyv1795 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1861 := &yyv1860 - yy1861.CodecEncodeSelf(e) + yy1796 := &yyv1795 + yy1796.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22474,83 +21762,83 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1862 := *v - yyh1862, yyl1862 := z.DecSliceHelperStart() - var yyc1862 bool - if yyl1862 == 0 { - if yyv1862 == nil { - yyv1862 = []DaemonSet{} - yyc1862 = true - } else if len(yyv1862) != 0 { - yyv1862 = yyv1862[:0] - yyc1862 = true + yyv1797 := *v + yyh1797, yyl1797 := z.DecSliceHelperStart() + var yyc1797 bool + if yyl1797 == 0 { + if yyv1797 == nil { + yyv1797 = []DaemonSet{} + yyc1797 = true + } else if len(yyv1797) != 0 { + yyv1797 = yyv1797[:0] + yyc1797 = true } - } else if yyl1862 > 0 { - var yyrr1862, yyrl1862 int - var yyrt1862 bool - if yyl1862 > cap(yyv1862) { + } else if yyl1797 > 0 { + var yyrr1797, yyrl1797 int + var yyrt1797 bool + if yyl1797 > cap(yyv1797) { - yyrg1862 := len(yyv1862) > 0 - yyv21862 := yyv1862 - yyrl1862, yyrt1862 = z.DecInferLen(yyl1862, z.DecBasicHandle().MaxInitLen, 752) - if yyrt1862 { - if yyrl1862 <= cap(yyv1862) { - yyv1862 = yyv1862[:yyrl1862] + yyrg1797 := len(yyv1797) > 0 + yyv21797 := yyv1797 + yyrl1797, yyrt1797 = z.DecInferLen(yyl1797, z.DecBasicHandle().MaxInitLen, 752) + if yyrt1797 { + if yyrl1797 <= cap(yyv1797) { + yyv1797 = yyv1797[:yyrl1797] } else { - yyv1862 = make([]DaemonSet, yyrl1862) + yyv1797 = make([]DaemonSet, yyrl1797) } } else { - yyv1862 = make([]DaemonSet, yyrl1862) + yyv1797 = make([]DaemonSet, yyrl1797) } - yyc1862 = true - yyrr1862 = len(yyv1862) - if yyrg1862 { - copy(yyv1862, yyv21862) + yyc1797 = true + yyrr1797 = len(yyv1797) + if yyrg1797 { + copy(yyv1797, yyv21797) } - } else if yyl1862 != len(yyv1862) { - yyv1862 = yyv1862[:yyl1862] - yyc1862 = true + } else if yyl1797 != len(yyv1797) { + yyv1797 = yyv1797[:yyl1797] + yyc1797 = true } - yyj1862 := 0 - for ; yyj1862 < yyrr1862; yyj1862++ { - yyh1862.ElemContainerState(yyj1862) + yyj1797 := 0 + for ; yyj1797 < yyrr1797; yyj1797++ { + yyh1797.ElemContainerState(yyj1797) if r.TryDecodeAsNil() { - yyv1862[yyj1862] = DaemonSet{} + yyv1797[yyj1797] = DaemonSet{} } else { - yyv1863 := &yyv1862[yyj1862] - yyv1863.CodecDecodeSelf(d) + yyv1798 := &yyv1797[yyj1797] + yyv1798.CodecDecodeSelf(d) } } - if yyrt1862 { - for ; yyj1862 < yyl1862; yyj1862++ { - yyv1862 = append(yyv1862, DaemonSet{}) - yyh1862.ElemContainerState(yyj1862) + if yyrt1797 { + for ; yyj1797 < yyl1797; yyj1797++ { + yyv1797 = append(yyv1797, DaemonSet{}) + yyh1797.ElemContainerState(yyj1797) if r.TryDecodeAsNil() { - yyv1862[yyj1862] = DaemonSet{} + yyv1797[yyj1797] = DaemonSet{} } else { - yyv1864 := &yyv1862[yyj1862] - yyv1864.CodecDecodeSelf(d) + yyv1799 := &yyv1797[yyj1797] + yyv1799.CodecDecodeSelf(d) } } } } else { - yyj1862 := 0 - for ; !r.CheckBreak(); yyj1862++ { + yyj1797 := 0 + for ; !r.CheckBreak(); yyj1797++ { - if yyj1862 >= len(yyv1862) { - yyv1862 = append(yyv1862, DaemonSet{}) // var yyz1862 DaemonSet - yyc1862 = true + if yyj1797 >= len(yyv1797) { + yyv1797 = append(yyv1797, DaemonSet{}) // var yyz1797 DaemonSet + yyc1797 = true } - yyh1862.ElemContainerState(yyj1862) - if yyj1862 < len(yyv1862) { + yyh1797.ElemContainerState(yyj1797) + if yyj1797 < len(yyv1797) { if r.TryDecodeAsNil() { - yyv1862[yyj1862] = DaemonSet{} + yyv1797[yyj1797] = DaemonSet{} } else { - yyv1865 := &yyv1862[yyj1862] - yyv1865.CodecDecodeSelf(d) + yyv1800 := &yyv1797[yyj1797] + yyv1800.CodecDecodeSelf(d) } } else { @@ -22558,17 +21846,17 @@ func (x codecSelfer1234) decSliceDaemonSet(v *[]DaemonSet, d *codec1978.Decoder) } } - if yyj1862 < len(yyv1862) { - yyv1862 = yyv1862[:yyj1862] - yyc1862 = true - } else if yyj1862 == 0 && yyv1862 == nil { - yyv1862 = []DaemonSet{} - yyc1862 = true + if yyj1797 < len(yyv1797) { + yyv1797 = yyv1797[:yyj1797] + yyc1797 = true + } else if yyj1797 == 0 && yyv1797 == nil { + yyv1797 = []DaemonSet{} + yyc1797 = true } } - yyh1862.End() - if yyc1862 { - *v = yyv1862 + yyh1797.End() + if yyc1797 { + *v = yyv1797 } } @@ -22577,10 +21865,10 @@ func (x codecSelfer1234) encSliceThirdPartyResourceData(v []ThirdPartyResourceDa z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1866 := range v { + for _, yyv1801 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1867 := &yyv1866 - yy1867.CodecEncodeSelf(e) + yy1802 := &yyv1801 + yy1802.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } @@ -22590,83 +21878,83 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1868 := *v - yyh1868, yyl1868 := z.DecSliceHelperStart() - var yyc1868 bool - if yyl1868 == 0 { - if yyv1868 == nil { - yyv1868 = []ThirdPartyResourceData{} - yyc1868 = true - } else if len(yyv1868) != 0 { - yyv1868 = yyv1868[:0] - yyc1868 = true + yyv1803 := *v + yyh1803, yyl1803 := z.DecSliceHelperStart() + var yyc1803 bool + if yyl1803 == 0 { + if yyv1803 == nil { + yyv1803 = []ThirdPartyResourceData{} + yyc1803 = true + } else if len(yyv1803) != 0 { + yyv1803 = yyv1803[:0] + yyc1803 = true } - } else if yyl1868 > 0 { - var yyrr1868, yyrl1868 int - var yyrt1868 bool - if yyl1868 > cap(yyv1868) { + } else if yyl1803 > 0 { + var yyrr1803, yyrl1803 int + var yyrt1803 bool + if yyl1803 > cap(yyv1803) { - yyrg1868 := len(yyv1868) > 0 - yyv21868 := yyv1868 - yyrl1868, yyrt1868 = z.DecInferLen(yyl1868, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1868 { - if yyrl1868 <= cap(yyv1868) { - yyv1868 = yyv1868[:yyrl1868] + yyrg1803 := len(yyv1803) > 0 + yyv21803 := yyv1803 + yyrl1803, yyrt1803 = z.DecInferLen(yyl1803, z.DecBasicHandle().MaxInitLen, 280) + if yyrt1803 { + if yyrl1803 <= cap(yyv1803) { + yyv1803 = yyv1803[:yyrl1803] } else { - yyv1868 = make([]ThirdPartyResourceData, yyrl1868) + yyv1803 = make([]ThirdPartyResourceData, yyrl1803) } } else { - yyv1868 = make([]ThirdPartyResourceData, yyrl1868) + yyv1803 = make([]ThirdPartyResourceData, yyrl1803) } - yyc1868 = true - yyrr1868 = len(yyv1868) - if yyrg1868 { - copy(yyv1868, yyv21868) + yyc1803 = true + yyrr1803 = len(yyv1803) + if yyrg1803 { + copy(yyv1803, yyv21803) } - } else if yyl1868 != len(yyv1868) { - yyv1868 = yyv1868[:yyl1868] - yyc1868 = true + } else if yyl1803 != len(yyv1803) { + yyv1803 = yyv1803[:yyl1803] + yyc1803 = true } - yyj1868 := 0 - for ; yyj1868 < yyrr1868; yyj1868++ { - yyh1868.ElemContainerState(yyj1868) + yyj1803 := 0 + for ; yyj1803 < yyrr1803; yyj1803++ { + yyh1803.ElemContainerState(yyj1803) if r.TryDecodeAsNil() { - yyv1868[yyj1868] = ThirdPartyResourceData{} + yyv1803[yyj1803] = ThirdPartyResourceData{} } else { - yyv1869 := &yyv1868[yyj1868] - yyv1869.CodecDecodeSelf(d) + yyv1804 := &yyv1803[yyj1803] + yyv1804.CodecDecodeSelf(d) } } - if yyrt1868 { - for ; yyj1868 < yyl1868; yyj1868++ { - yyv1868 = append(yyv1868, ThirdPartyResourceData{}) - yyh1868.ElemContainerState(yyj1868) + if yyrt1803 { + for ; yyj1803 < yyl1803; yyj1803++ { + yyv1803 = append(yyv1803, ThirdPartyResourceData{}) + yyh1803.ElemContainerState(yyj1803) if r.TryDecodeAsNil() { - yyv1868[yyj1868] = ThirdPartyResourceData{} + yyv1803[yyj1803] = ThirdPartyResourceData{} } else { - yyv1870 := &yyv1868[yyj1868] - yyv1870.CodecDecodeSelf(d) + yyv1805 := &yyv1803[yyj1803] + yyv1805.CodecDecodeSelf(d) } } } } else { - yyj1868 := 0 - for ; !r.CheckBreak(); yyj1868++ { + yyj1803 := 0 + for ; !r.CheckBreak(); yyj1803++ { - if yyj1868 >= len(yyv1868) { - yyv1868 = append(yyv1868, ThirdPartyResourceData{}) // var yyz1868 ThirdPartyResourceData - yyc1868 = true + if yyj1803 >= len(yyv1803) { + yyv1803 = append(yyv1803, ThirdPartyResourceData{}) // var yyz1803 ThirdPartyResourceData + yyc1803 = true } - yyh1868.ElemContainerState(yyj1868) - if yyj1868 < len(yyv1868) { + yyh1803.ElemContainerState(yyj1803) + if yyj1803 < len(yyv1803) { if r.TryDecodeAsNil() { - yyv1868[yyj1868] = ThirdPartyResourceData{} + yyv1803[yyj1803] = ThirdPartyResourceData{} } else { - yyv1871 := &yyv1868[yyj1868] - yyv1871.CodecDecodeSelf(d) + yyv1806 := &yyv1803[yyj1803] + yyv1806.CodecDecodeSelf(d) } } else { @@ -22674,17 +21962,17 @@ func (x codecSelfer1234) decSliceThirdPartyResourceData(v *[]ThirdPartyResourceD } } - if yyj1868 < len(yyv1868) { - yyv1868 = yyv1868[:yyj1868] - yyc1868 = true - } else if yyj1868 == 0 && yyv1868 == nil { - yyv1868 = []ThirdPartyResourceData{} - yyc1868 = true + if yyj1803 < len(yyv1803) { + yyv1803 = yyv1803[:yyj1803] + yyc1803 = true + } else if yyj1803 == 0 && yyv1803 == nil { + yyv1803 = []ThirdPartyResourceData{} + yyc1803 = true } } - yyh1868.End() - if yyc1868 { - *v = yyv1868 + yyh1803.End() + if yyc1803 { + *v = yyv1803 } } @@ -22693,212 +21981,96 @@ func (x codecSelfer1234) encSliceJob(v []Job, e *codec1978.Encoder) { z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1872 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1873 := &yyv1872 - yy1873.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv1874 := *v - yyh1874, yyl1874 := z.DecSliceHelperStart() - var yyc1874 bool - if yyl1874 == 0 { - if yyv1874 == nil { - yyv1874 = []Job{} - yyc1874 = true - } else if len(yyv1874) != 0 { - yyv1874 = yyv1874[:0] - yyc1874 = true - } - } else if yyl1874 > 0 { - var yyrr1874, yyrl1874 int - var yyrt1874 bool - if yyl1874 > cap(yyv1874) { - - yyrg1874 := len(yyv1874) > 0 - yyv21874 := yyv1874 - yyrl1874, yyrt1874 = z.DecInferLen(yyl1874, z.DecBasicHandle().MaxInitLen, 824) - if yyrt1874 { - if yyrl1874 <= cap(yyv1874) { - yyv1874 = yyv1874[:yyrl1874] - } else { - yyv1874 = make([]Job, yyrl1874) - } - } else { - yyv1874 = make([]Job, yyrl1874) - } - yyc1874 = true - yyrr1874 = len(yyv1874) - if yyrg1874 { - copy(yyv1874, yyv21874) - } - } else if yyl1874 != len(yyv1874) { - yyv1874 = yyv1874[:yyl1874] - yyc1874 = true - } - yyj1874 := 0 - for ; yyj1874 < yyrr1874; yyj1874++ { - yyh1874.ElemContainerState(yyj1874) - if r.TryDecodeAsNil() { - yyv1874[yyj1874] = Job{} - } else { - yyv1875 := &yyv1874[yyj1874] - yyv1875.CodecDecodeSelf(d) - } - - } - if yyrt1874 { - for ; yyj1874 < yyl1874; yyj1874++ { - yyv1874 = append(yyv1874, Job{}) - yyh1874.ElemContainerState(yyj1874) - if r.TryDecodeAsNil() { - yyv1874[yyj1874] = Job{} - } else { - yyv1876 := &yyv1874[yyj1874] - yyv1876.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj1874 := 0 - for ; !r.CheckBreak(); yyj1874++ { - - if yyj1874 >= len(yyv1874) { - yyv1874 = append(yyv1874, Job{}) // var yyz1874 Job - yyc1874 = true - } - yyh1874.ElemContainerState(yyj1874) - if yyj1874 < len(yyv1874) { - if r.TryDecodeAsNil() { - yyv1874[yyj1874] = Job{} - } else { - yyv1877 := &yyv1874[yyj1874] - yyv1877.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj1874 < len(yyv1874) { - yyv1874 = yyv1874[:yyj1874] - yyc1874 = true - } else if yyj1874 == 0 && yyv1874 == nil { - yyv1874 = []Job{} - yyc1874 = true - } - } - yyh1874.End() - if yyc1874 { - *v = yyv1874 - } -} - -func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv1878 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1879 := &yyv1878 - yy1879.CodecEncodeSelf(e) + for _, yyv1807 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy1808 := &yyv1807 + yy1808.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceJob(v *[]Job, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1880 := *v - yyh1880, yyl1880 := z.DecSliceHelperStart() - var yyc1880 bool - if yyl1880 == 0 { - if yyv1880 == nil { - yyv1880 = []JobCondition{} - yyc1880 = true - } else if len(yyv1880) != 0 { - yyv1880 = yyv1880[:0] - yyc1880 = true + yyv1809 := *v + yyh1809, yyl1809 := z.DecSliceHelperStart() + var yyc1809 bool + if yyl1809 == 0 { + if yyv1809 == nil { + yyv1809 = []Job{} + yyc1809 = true + } else if len(yyv1809) != 0 { + yyv1809 = yyv1809[:0] + yyc1809 = true } - } else if yyl1880 > 0 { - var yyrr1880, yyrl1880 int - var yyrt1880 bool - if yyl1880 > cap(yyv1880) { + } else if yyl1809 > 0 { + var yyrr1809, yyrl1809 int + var yyrt1809 bool + if yyl1809 > cap(yyv1809) { - yyrg1880 := len(yyv1880) > 0 - yyv21880 := yyv1880 - yyrl1880, yyrt1880 = z.DecInferLen(yyl1880, z.DecBasicHandle().MaxInitLen, 112) - if yyrt1880 { - if yyrl1880 <= cap(yyv1880) { - yyv1880 = yyv1880[:yyrl1880] + yyrg1809 := len(yyv1809) > 0 + yyv21809 := yyv1809 + yyrl1809, yyrt1809 = z.DecInferLen(yyl1809, z.DecBasicHandle().MaxInitLen, 824) + if yyrt1809 { + if yyrl1809 <= cap(yyv1809) { + yyv1809 = yyv1809[:yyrl1809] } else { - yyv1880 = make([]JobCondition, yyrl1880) + yyv1809 = make([]Job, yyrl1809) } } else { - yyv1880 = make([]JobCondition, yyrl1880) + yyv1809 = make([]Job, yyrl1809) } - yyc1880 = true - yyrr1880 = len(yyv1880) - if yyrg1880 { - copy(yyv1880, yyv21880) + yyc1809 = true + yyrr1809 = len(yyv1809) + if yyrg1809 { + copy(yyv1809, yyv21809) } - } else if yyl1880 != len(yyv1880) { - yyv1880 = yyv1880[:yyl1880] - yyc1880 = true + } else if yyl1809 != len(yyv1809) { + yyv1809 = yyv1809[:yyl1809] + yyc1809 = true } - yyj1880 := 0 - for ; yyj1880 < yyrr1880; yyj1880++ { - yyh1880.ElemContainerState(yyj1880) + yyj1809 := 0 + for ; yyj1809 < yyrr1809; yyj1809++ { + yyh1809.ElemContainerState(yyj1809) if r.TryDecodeAsNil() { - yyv1880[yyj1880] = JobCondition{} + yyv1809[yyj1809] = Job{} } else { - yyv1881 := &yyv1880[yyj1880] - yyv1881.CodecDecodeSelf(d) + yyv1810 := &yyv1809[yyj1809] + yyv1810.CodecDecodeSelf(d) } } - if yyrt1880 { - for ; yyj1880 < yyl1880; yyj1880++ { - yyv1880 = append(yyv1880, JobCondition{}) - yyh1880.ElemContainerState(yyj1880) + if yyrt1809 { + for ; yyj1809 < yyl1809; yyj1809++ { + yyv1809 = append(yyv1809, Job{}) + yyh1809.ElemContainerState(yyj1809) if r.TryDecodeAsNil() { - yyv1880[yyj1880] = JobCondition{} + yyv1809[yyj1809] = Job{} } else { - yyv1882 := &yyv1880[yyj1880] - yyv1882.CodecDecodeSelf(d) + yyv1811 := &yyv1809[yyj1809] + yyv1811.CodecDecodeSelf(d) } } } } else { - yyj1880 := 0 - for ; !r.CheckBreak(); yyj1880++ { + yyj1809 := 0 + for ; !r.CheckBreak(); yyj1809++ { - if yyj1880 >= len(yyv1880) { - yyv1880 = append(yyv1880, JobCondition{}) // var yyz1880 JobCondition - yyc1880 = true + if yyj1809 >= len(yyv1809) { + yyv1809 = append(yyv1809, Job{}) // var yyz1809 Job + yyc1809 = true } - yyh1880.ElemContainerState(yyj1880) - if yyj1880 < len(yyv1880) { + yyh1809.ElemContainerState(yyj1809) + if yyj1809 < len(yyv1809) { if r.TryDecodeAsNil() { - yyv1880[yyj1880] = JobCondition{} + yyv1809[yyj1809] = Job{} } else { - yyv1883 := &yyv1880[yyj1880] - yyv1883.CodecDecodeSelf(d) + yyv1812 := &yyv1809[yyj1809] + yyv1812.CodecDecodeSelf(d) } } else { @@ -22906,115 +22078,115 @@ func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.De } } - if yyj1880 < len(yyv1880) { - yyv1880 = yyv1880[:yyj1880] - yyc1880 = true - } else if yyj1880 == 0 && yyv1880 == nil { - yyv1880 = []JobCondition{} - yyc1880 = true + if yyj1809 < len(yyv1809) { + yyv1809 = yyv1809[:yyj1809] + yyc1809 = true + } else if yyj1809 == 0 && yyv1809 == nil { + yyv1809 = []Job{} + yyc1809 = true } } - yyh1880.End() - if yyc1880 { - *v = yyv1880 + yyh1809.End() + if yyc1809 { + *v = yyv1809 } } -func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceJobCondition(v []JobCondition, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1884 := range v { + for _, yyv1813 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1885 := &yyv1884 - yy1885.CodecEncodeSelf(e) + yy1814 := &yyv1813 + yy1814.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceJobCondition(v *[]JobCondition, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1886 := *v - yyh1886, yyl1886 := z.DecSliceHelperStart() - var yyc1886 bool - if yyl1886 == 0 { - if yyv1886 == nil { - yyv1886 = []Ingress{} - yyc1886 = true - } else if len(yyv1886) != 0 { - yyv1886 = yyv1886[:0] - yyc1886 = true + yyv1815 := *v + yyh1815, yyl1815 := z.DecSliceHelperStart() + var yyc1815 bool + if yyl1815 == 0 { + if yyv1815 == nil { + yyv1815 = []JobCondition{} + yyc1815 = true + } else if len(yyv1815) != 0 { + yyv1815 = yyv1815[:0] + yyc1815 = true } - } else if yyl1886 > 0 { - var yyrr1886, yyrl1886 int - var yyrt1886 bool - if yyl1886 > cap(yyv1886) { + } else if yyl1815 > 0 { + var yyrr1815, yyrl1815 int + var yyrt1815 bool + if yyl1815 > cap(yyv1815) { - yyrg1886 := len(yyv1886) > 0 - yyv21886 := yyv1886 - yyrl1886, yyrt1886 = z.DecInferLen(yyl1886, z.DecBasicHandle().MaxInitLen, 336) - if yyrt1886 { - if yyrl1886 <= cap(yyv1886) { - yyv1886 = yyv1886[:yyrl1886] + yyrg1815 := len(yyv1815) > 0 + yyv21815 := yyv1815 + yyrl1815, yyrt1815 = z.DecInferLen(yyl1815, z.DecBasicHandle().MaxInitLen, 112) + if yyrt1815 { + if yyrl1815 <= cap(yyv1815) { + yyv1815 = yyv1815[:yyrl1815] } else { - yyv1886 = make([]Ingress, yyrl1886) + yyv1815 = make([]JobCondition, yyrl1815) } } else { - yyv1886 = make([]Ingress, yyrl1886) + yyv1815 = make([]JobCondition, yyrl1815) } - yyc1886 = true - yyrr1886 = len(yyv1886) - if yyrg1886 { - copy(yyv1886, yyv21886) + yyc1815 = true + yyrr1815 = len(yyv1815) + if yyrg1815 { + copy(yyv1815, yyv21815) } - } else if yyl1886 != len(yyv1886) { - yyv1886 = yyv1886[:yyl1886] - yyc1886 = true + } else if yyl1815 != len(yyv1815) { + yyv1815 = yyv1815[:yyl1815] + yyc1815 = true } - yyj1886 := 0 - for ; yyj1886 < yyrr1886; yyj1886++ { - yyh1886.ElemContainerState(yyj1886) + yyj1815 := 0 + for ; yyj1815 < yyrr1815; yyj1815++ { + yyh1815.ElemContainerState(yyj1815) if r.TryDecodeAsNil() { - yyv1886[yyj1886] = Ingress{} + yyv1815[yyj1815] = JobCondition{} } else { - yyv1887 := &yyv1886[yyj1886] - yyv1887.CodecDecodeSelf(d) + yyv1816 := &yyv1815[yyj1815] + yyv1816.CodecDecodeSelf(d) } } - if yyrt1886 { - for ; yyj1886 < yyl1886; yyj1886++ { - yyv1886 = append(yyv1886, Ingress{}) - yyh1886.ElemContainerState(yyj1886) + if yyrt1815 { + for ; yyj1815 < yyl1815; yyj1815++ { + yyv1815 = append(yyv1815, JobCondition{}) + yyh1815.ElemContainerState(yyj1815) if r.TryDecodeAsNil() { - yyv1886[yyj1886] = Ingress{} + yyv1815[yyj1815] = JobCondition{} } else { - yyv1888 := &yyv1886[yyj1886] - yyv1888.CodecDecodeSelf(d) + yyv1817 := &yyv1815[yyj1815] + yyv1817.CodecDecodeSelf(d) } } } } else { - yyj1886 := 0 - for ; !r.CheckBreak(); yyj1886++ { + yyj1815 := 0 + for ; !r.CheckBreak(); yyj1815++ { - if yyj1886 >= len(yyv1886) { - yyv1886 = append(yyv1886, Ingress{}) // var yyz1886 Ingress - yyc1886 = true + if yyj1815 >= len(yyv1815) { + yyv1815 = append(yyv1815, JobCondition{}) // var yyz1815 JobCondition + yyc1815 = true } - yyh1886.ElemContainerState(yyj1886) - if yyj1886 < len(yyv1886) { + yyh1815.ElemContainerState(yyj1815) + if yyj1815 < len(yyv1815) { if r.TryDecodeAsNil() { - yyv1886[yyj1886] = Ingress{} + yyv1815[yyj1815] = JobCondition{} } else { - yyv1889 := &yyv1886[yyj1886] - yyv1889.CodecDecodeSelf(d) + yyv1818 := &yyv1815[yyj1815] + yyv1818.CodecDecodeSelf(d) } } else { @@ -23022,115 +22194,115 @@ func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { } } - if yyj1886 < len(yyv1886) { - yyv1886 = yyv1886[:yyj1886] - yyc1886 = true - } else if yyj1886 == 0 && yyv1886 == nil { - yyv1886 = []Ingress{} - yyc1886 = true + if yyj1815 < len(yyv1815) { + yyv1815 = yyv1815[:yyj1815] + yyc1815 = true + } else if yyj1815 == 0 && yyv1815 == nil { + yyv1815 = []JobCondition{} + yyc1815 = true } } - yyh1886.End() - if yyc1886 { - *v = yyv1886 + yyh1815.End() + if yyc1815 { + *v = yyv1815 } } -func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngress(v []Ingress, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1890 := range v { + for _, yyv1819 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1891 := &yyv1890 - yy1891.CodecEncodeSelf(e) + yy1820 := &yyv1819 + yy1820.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngress(v *[]Ingress, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1892 := *v - yyh1892, yyl1892 := z.DecSliceHelperStart() - var yyc1892 bool - if yyl1892 == 0 { - if yyv1892 == nil { - yyv1892 = []IngressTLS{} - yyc1892 = true - } else if len(yyv1892) != 0 { - yyv1892 = yyv1892[:0] - yyc1892 = true + yyv1821 := *v + yyh1821, yyl1821 := z.DecSliceHelperStart() + var yyc1821 bool + if yyl1821 == 0 { + if yyv1821 == nil { + yyv1821 = []Ingress{} + yyc1821 = true + } else if len(yyv1821) != 0 { + yyv1821 = yyv1821[:0] + yyc1821 = true } - } else if yyl1892 > 0 { - var yyrr1892, yyrl1892 int - var yyrt1892 bool - if yyl1892 > cap(yyv1892) { + } else if yyl1821 > 0 { + var yyrr1821, yyrl1821 int + var yyrt1821 bool + if yyl1821 > cap(yyv1821) { - yyrg1892 := len(yyv1892) > 0 - yyv21892 := yyv1892 - yyrl1892, yyrt1892 = z.DecInferLen(yyl1892, z.DecBasicHandle().MaxInitLen, 40) - if yyrt1892 { - if yyrl1892 <= cap(yyv1892) { - yyv1892 = yyv1892[:yyrl1892] + yyrg1821 := len(yyv1821) > 0 + yyv21821 := yyv1821 + yyrl1821, yyrt1821 = z.DecInferLen(yyl1821, z.DecBasicHandle().MaxInitLen, 336) + if yyrt1821 { + if yyrl1821 <= cap(yyv1821) { + yyv1821 = yyv1821[:yyrl1821] } else { - yyv1892 = make([]IngressTLS, yyrl1892) + yyv1821 = make([]Ingress, yyrl1821) } } else { - yyv1892 = make([]IngressTLS, yyrl1892) + yyv1821 = make([]Ingress, yyrl1821) } - yyc1892 = true - yyrr1892 = len(yyv1892) - if yyrg1892 { - copy(yyv1892, yyv21892) + yyc1821 = true + yyrr1821 = len(yyv1821) + if yyrg1821 { + copy(yyv1821, yyv21821) } - } else if yyl1892 != len(yyv1892) { - yyv1892 = yyv1892[:yyl1892] - yyc1892 = true + } else if yyl1821 != len(yyv1821) { + yyv1821 = yyv1821[:yyl1821] + yyc1821 = true } - yyj1892 := 0 - for ; yyj1892 < yyrr1892; yyj1892++ { - yyh1892.ElemContainerState(yyj1892) + yyj1821 := 0 + for ; yyj1821 < yyrr1821; yyj1821++ { + yyh1821.ElemContainerState(yyj1821) if r.TryDecodeAsNil() { - yyv1892[yyj1892] = IngressTLS{} + yyv1821[yyj1821] = Ingress{} } else { - yyv1893 := &yyv1892[yyj1892] - yyv1893.CodecDecodeSelf(d) + yyv1822 := &yyv1821[yyj1821] + yyv1822.CodecDecodeSelf(d) } } - if yyrt1892 { - for ; yyj1892 < yyl1892; yyj1892++ { - yyv1892 = append(yyv1892, IngressTLS{}) - yyh1892.ElemContainerState(yyj1892) + if yyrt1821 { + for ; yyj1821 < yyl1821; yyj1821++ { + yyv1821 = append(yyv1821, Ingress{}) + yyh1821.ElemContainerState(yyj1821) if r.TryDecodeAsNil() { - yyv1892[yyj1892] = IngressTLS{} + yyv1821[yyj1821] = Ingress{} } else { - yyv1894 := &yyv1892[yyj1892] - yyv1894.CodecDecodeSelf(d) + yyv1823 := &yyv1821[yyj1821] + yyv1823.CodecDecodeSelf(d) } } } } else { - yyj1892 := 0 - for ; !r.CheckBreak(); yyj1892++ { + yyj1821 := 0 + for ; !r.CheckBreak(); yyj1821++ { - if yyj1892 >= len(yyv1892) { - yyv1892 = append(yyv1892, IngressTLS{}) // var yyz1892 IngressTLS - yyc1892 = true + if yyj1821 >= len(yyv1821) { + yyv1821 = append(yyv1821, Ingress{}) // var yyz1821 Ingress + yyc1821 = true } - yyh1892.ElemContainerState(yyj1892) - if yyj1892 < len(yyv1892) { + yyh1821.ElemContainerState(yyj1821) + if yyj1821 < len(yyv1821) { if r.TryDecodeAsNil() { - yyv1892[yyj1892] = IngressTLS{} + yyv1821[yyj1821] = Ingress{} } else { - yyv1895 := &yyv1892[yyj1892] - yyv1895.CodecDecodeSelf(d) + yyv1824 := &yyv1821[yyj1821] + yyv1824.CodecDecodeSelf(d) } } else { @@ -23138,115 +22310,115 @@ func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decode } } - if yyj1892 < len(yyv1892) { - yyv1892 = yyv1892[:yyj1892] - yyc1892 = true - } else if yyj1892 == 0 && yyv1892 == nil { - yyv1892 = []IngressTLS{} - yyc1892 = true + if yyj1821 < len(yyv1821) { + yyv1821 = yyv1821[:yyj1821] + yyc1821 = true + } else if yyj1821 == 0 && yyv1821 == nil { + yyv1821 = []Ingress{} + yyc1821 = true } } - yyh1892.End() - if yyc1892 { - *v = yyv1892 + yyh1821.End() + if yyc1821 { + *v = yyv1821 } } -func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngressTLS(v []IngressTLS, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1896 := range v { + for _, yyv1825 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1897 := &yyv1896 - yy1897.CodecEncodeSelf(e) + yy1826 := &yyv1825 + yy1826.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngressTLS(v *[]IngressTLS, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1898 := *v - yyh1898, yyl1898 := z.DecSliceHelperStart() - var yyc1898 bool - if yyl1898 == 0 { - if yyv1898 == nil { - yyv1898 = []IngressRule{} - yyc1898 = true - } else if len(yyv1898) != 0 { - yyv1898 = yyv1898[:0] - yyc1898 = true + yyv1827 := *v + yyh1827, yyl1827 := z.DecSliceHelperStart() + var yyc1827 bool + if yyl1827 == 0 { + if yyv1827 == nil { + yyv1827 = []IngressTLS{} + yyc1827 = true + } else if len(yyv1827) != 0 { + yyv1827 = yyv1827[:0] + yyc1827 = true } - } else if yyl1898 > 0 { - var yyrr1898, yyrl1898 int - var yyrt1898 bool - if yyl1898 > cap(yyv1898) { + } else if yyl1827 > 0 { + var yyrr1827, yyrl1827 int + var yyrt1827 bool + if yyl1827 > cap(yyv1827) { - yyrg1898 := len(yyv1898) > 0 - yyv21898 := yyv1898 - yyrl1898, yyrt1898 = z.DecInferLen(yyl1898, z.DecBasicHandle().MaxInitLen, 24) - if yyrt1898 { - if yyrl1898 <= cap(yyv1898) { - yyv1898 = yyv1898[:yyrl1898] + yyrg1827 := len(yyv1827) > 0 + yyv21827 := yyv1827 + yyrl1827, yyrt1827 = z.DecInferLen(yyl1827, z.DecBasicHandle().MaxInitLen, 40) + if yyrt1827 { + if yyrl1827 <= cap(yyv1827) { + yyv1827 = yyv1827[:yyrl1827] } else { - yyv1898 = make([]IngressRule, yyrl1898) + yyv1827 = make([]IngressTLS, yyrl1827) } } else { - yyv1898 = make([]IngressRule, yyrl1898) + yyv1827 = make([]IngressTLS, yyrl1827) } - yyc1898 = true - yyrr1898 = len(yyv1898) - if yyrg1898 { - copy(yyv1898, yyv21898) + yyc1827 = true + yyrr1827 = len(yyv1827) + if yyrg1827 { + copy(yyv1827, yyv21827) } - } else if yyl1898 != len(yyv1898) { - yyv1898 = yyv1898[:yyl1898] - yyc1898 = true + } else if yyl1827 != len(yyv1827) { + yyv1827 = yyv1827[:yyl1827] + yyc1827 = true } - yyj1898 := 0 - for ; yyj1898 < yyrr1898; yyj1898++ { - yyh1898.ElemContainerState(yyj1898) + yyj1827 := 0 + for ; yyj1827 < yyrr1827; yyj1827++ { + yyh1827.ElemContainerState(yyj1827) if r.TryDecodeAsNil() { - yyv1898[yyj1898] = IngressRule{} + yyv1827[yyj1827] = IngressTLS{} } else { - yyv1899 := &yyv1898[yyj1898] - yyv1899.CodecDecodeSelf(d) + yyv1828 := &yyv1827[yyj1827] + yyv1828.CodecDecodeSelf(d) } } - if yyrt1898 { - for ; yyj1898 < yyl1898; yyj1898++ { - yyv1898 = append(yyv1898, IngressRule{}) - yyh1898.ElemContainerState(yyj1898) + if yyrt1827 { + for ; yyj1827 < yyl1827; yyj1827++ { + yyv1827 = append(yyv1827, IngressTLS{}) + yyh1827.ElemContainerState(yyj1827) if r.TryDecodeAsNil() { - yyv1898[yyj1898] = IngressRule{} + yyv1827[yyj1827] = IngressTLS{} } else { - yyv1900 := &yyv1898[yyj1898] - yyv1900.CodecDecodeSelf(d) + yyv1829 := &yyv1827[yyj1827] + yyv1829.CodecDecodeSelf(d) } } } } else { - yyj1898 := 0 - for ; !r.CheckBreak(); yyj1898++ { + yyj1827 := 0 + for ; !r.CheckBreak(); yyj1827++ { - if yyj1898 >= len(yyv1898) { - yyv1898 = append(yyv1898, IngressRule{}) // var yyz1898 IngressRule - yyc1898 = true + if yyj1827 >= len(yyv1827) { + yyv1827 = append(yyv1827, IngressTLS{}) // var yyz1827 IngressTLS + yyc1827 = true } - yyh1898.ElemContainerState(yyj1898) - if yyj1898 < len(yyv1898) { + yyh1827.ElemContainerState(yyj1827) + if yyj1827 < len(yyv1827) { if r.TryDecodeAsNil() { - yyv1898[yyj1898] = IngressRule{} + yyv1827[yyj1827] = IngressTLS{} } else { - yyv1901 := &yyv1898[yyj1898] - yyv1901.CodecDecodeSelf(d) + yyv1830 := &yyv1827[yyj1827] + yyv1830.CodecDecodeSelf(d) } } else { @@ -23254,115 +22426,115 @@ func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Deco } } - if yyj1898 < len(yyv1898) { - yyv1898 = yyv1898[:yyj1898] - yyc1898 = true - } else if yyj1898 == 0 && yyv1898 == nil { - yyv1898 = []IngressRule{} - yyc1898 = true + if yyj1827 < len(yyv1827) { + yyv1827 = yyv1827[:yyj1827] + yyc1827 = true + } else if yyj1827 == 0 && yyv1827 == nil { + yyv1827 = []IngressTLS{} + yyc1827 = true } } - yyh1898.End() - if yyc1898 { - *v = yyv1898 + yyh1827.End() + if yyc1827 { + *v = yyv1827 } } -func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIngressRule(v []IngressRule, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1902 := range v { + for _, yyv1831 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1903 := &yyv1902 - yy1903.CodecEncodeSelf(e) + yy1832 := &yyv1831 + yy1832.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIngressRule(v *[]IngressRule, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1904 := *v - yyh1904, yyl1904 := z.DecSliceHelperStart() - var yyc1904 bool - if yyl1904 == 0 { - if yyv1904 == nil { - yyv1904 = []HTTPIngressPath{} - yyc1904 = true - } else if len(yyv1904) != 0 { - yyv1904 = yyv1904[:0] - yyc1904 = true - } - } else if yyl1904 > 0 { - var yyrr1904, yyrl1904 int - var yyrt1904 bool - if yyl1904 > cap(yyv1904) { - - yyrg1904 := len(yyv1904) > 0 - yyv21904 := yyv1904 - yyrl1904, yyrt1904 = z.DecInferLen(yyl1904, z.DecBasicHandle().MaxInitLen, 64) - if yyrt1904 { - if yyrl1904 <= cap(yyv1904) { - yyv1904 = yyv1904[:yyrl1904] + yyv1833 := *v + yyh1833, yyl1833 := z.DecSliceHelperStart() + var yyc1833 bool + if yyl1833 == 0 { + if yyv1833 == nil { + yyv1833 = []IngressRule{} + yyc1833 = true + } else if len(yyv1833) != 0 { + yyv1833 = yyv1833[:0] + yyc1833 = true + } + } else if yyl1833 > 0 { + var yyrr1833, yyrl1833 int + var yyrt1833 bool + if yyl1833 > cap(yyv1833) { + + yyrg1833 := len(yyv1833) > 0 + yyv21833 := yyv1833 + yyrl1833, yyrt1833 = z.DecInferLen(yyl1833, z.DecBasicHandle().MaxInitLen, 24) + if yyrt1833 { + if yyrl1833 <= cap(yyv1833) { + yyv1833 = yyv1833[:yyrl1833] } else { - yyv1904 = make([]HTTPIngressPath, yyrl1904) + yyv1833 = make([]IngressRule, yyrl1833) } } else { - yyv1904 = make([]HTTPIngressPath, yyrl1904) + yyv1833 = make([]IngressRule, yyrl1833) } - yyc1904 = true - yyrr1904 = len(yyv1904) - if yyrg1904 { - copy(yyv1904, yyv21904) + yyc1833 = true + yyrr1833 = len(yyv1833) + if yyrg1833 { + copy(yyv1833, yyv21833) } - } else if yyl1904 != len(yyv1904) { - yyv1904 = yyv1904[:yyl1904] - yyc1904 = true + } else if yyl1833 != len(yyv1833) { + yyv1833 = yyv1833[:yyl1833] + yyc1833 = true } - yyj1904 := 0 - for ; yyj1904 < yyrr1904; yyj1904++ { - yyh1904.ElemContainerState(yyj1904) + yyj1833 := 0 + for ; yyj1833 < yyrr1833; yyj1833++ { + yyh1833.ElemContainerState(yyj1833) if r.TryDecodeAsNil() { - yyv1904[yyj1904] = HTTPIngressPath{} + yyv1833[yyj1833] = IngressRule{} } else { - yyv1905 := &yyv1904[yyj1904] - yyv1905.CodecDecodeSelf(d) + yyv1834 := &yyv1833[yyj1833] + yyv1834.CodecDecodeSelf(d) } } - if yyrt1904 { - for ; yyj1904 < yyl1904; yyj1904++ { - yyv1904 = append(yyv1904, HTTPIngressPath{}) - yyh1904.ElemContainerState(yyj1904) + if yyrt1833 { + for ; yyj1833 < yyl1833; yyj1833++ { + yyv1833 = append(yyv1833, IngressRule{}) + yyh1833.ElemContainerState(yyj1833) if r.TryDecodeAsNil() { - yyv1904[yyj1904] = HTTPIngressPath{} + yyv1833[yyj1833] = IngressRule{} } else { - yyv1906 := &yyv1904[yyj1904] - yyv1906.CodecDecodeSelf(d) + yyv1835 := &yyv1833[yyj1833] + yyv1835.CodecDecodeSelf(d) } } } } else { - yyj1904 := 0 - for ; !r.CheckBreak(); yyj1904++ { + yyj1833 := 0 + for ; !r.CheckBreak(); yyj1833++ { - if yyj1904 >= len(yyv1904) { - yyv1904 = append(yyv1904, HTTPIngressPath{}) // var yyz1904 HTTPIngressPath - yyc1904 = true + if yyj1833 >= len(yyv1833) { + yyv1833 = append(yyv1833, IngressRule{}) // var yyz1833 IngressRule + yyc1833 = true } - yyh1904.ElemContainerState(yyj1904) - if yyj1904 < len(yyv1904) { + yyh1833.ElemContainerState(yyj1833) + if yyj1833 < len(yyv1833) { if r.TryDecodeAsNil() { - yyv1904[yyj1904] = HTTPIngressPath{} + yyv1833[yyj1833] = IngressRule{} } else { - yyv1907 := &yyv1904[yyj1904] - yyv1907.CodecDecodeSelf(d) + yyv1836 := &yyv1833[yyj1833] + yyv1836.CodecDecodeSelf(d) } } else { @@ -23370,115 +22542,115 @@ func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1 } } - if yyj1904 < len(yyv1904) { - yyv1904 = yyv1904[:yyj1904] - yyc1904 = true - } else if yyj1904 == 0 && yyv1904 == nil { - yyv1904 = []HTTPIngressPath{} - yyc1904 = true + if yyj1833 < len(yyv1833) { + yyv1833 = yyv1833[:yyj1833] + yyc1833 = true + } else if yyj1833 == 0 && yyv1833 == nil { + yyv1833 = []IngressRule{} + yyc1833 = true } } - yyh1904.End() - if yyc1904 { - *v = yyv1904 + yyh1833.End() + if yyc1833 { + *v = yyv1833 } } -func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceHTTPIngressPath(v []HTTPIngressPath, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1908 := range v { + for _, yyv1837 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1909 := &yyv1908 - yy1909.CodecEncodeSelf(e) + yy1838 := &yyv1837 + yy1838.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceHTTPIngressPath(v *[]HTTPIngressPath, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1910 := *v - yyh1910, yyl1910 := z.DecSliceHelperStart() - var yyc1910 bool - if yyl1910 == 0 { - if yyv1910 == nil { - yyv1910 = []LabelSelectorRequirement{} - yyc1910 = true - } else if len(yyv1910) != 0 { - yyv1910 = yyv1910[:0] - yyc1910 = true + yyv1839 := *v + yyh1839, yyl1839 := z.DecSliceHelperStart() + var yyc1839 bool + if yyl1839 == 0 { + if yyv1839 == nil { + yyv1839 = []HTTPIngressPath{} + yyc1839 = true + } else if len(yyv1839) != 0 { + yyv1839 = yyv1839[:0] + yyc1839 = true } - } else if yyl1910 > 0 { - var yyrr1910, yyrl1910 int - var yyrt1910 bool - if yyl1910 > cap(yyv1910) { + } else if yyl1839 > 0 { + var yyrr1839, yyrl1839 int + var yyrt1839 bool + if yyl1839 > cap(yyv1839) { - yyrg1910 := len(yyv1910) > 0 - yyv21910 := yyv1910 - yyrl1910, yyrt1910 = z.DecInferLen(yyl1910, z.DecBasicHandle().MaxInitLen, 56) - if yyrt1910 { - if yyrl1910 <= cap(yyv1910) { - yyv1910 = yyv1910[:yyrl1910] + yyrg1839 := len(yyv1839) > 0 + yyv21839 := yyv1839 + yyrl1839, yyrt1839 = z.DecInferLen(yyl1839, z.DecBasicHandle().MaxInitLen, 64) + if yyrt1839 { + if yyrl1839 <= cap(yyv1839) { + yyv1839 = yyv1839[:yyrl1839] } else { - yyv1910 = make([]LabelSelectorRequirement, yyrl1910) + yyv1839 = make([]HTTPIngressPath, yyrl1839) } } else { - yyv1910 = make([]LabelSelectorRequirement, yyrl1910) + yyv1839 = make([]HTTPIngressPath, yyrl1839) } - yyc1910 = true - yyrr1910 = len(yyv1910) - if yyrg1910 { - copy(yyv1910, yyv21910) + yyc1839 = true + yyrr1839 = len(yyv1839) + if yyrg1839 { + copy(yyv1839, yyv21839) } - } else if yyl1910 != len(yyv1910) { - yyv1910 = yyv1910[:yyl1910] - yyc1910 = true + } else if yyl1839 != len(yyv1839) { + yyv1839 = yyv1839[:yyl1839] + yyc1839 = true } - yyj1910 := 0 - for ; yyj1910 < yyrr1910; yyj1910++ { - yyh1910.ElemContainerState(yyj1910) + yyj1839 := 0 + for ; yyj1839 < yyrr1839; yyj1839++ { + yyh1839.ElemContainerState(yyj1839) if r.TryDecodeAsNil() { - yyv1910[yyj1910] = LabelSelectorRequirement{} + yyv1839[yyj1839] = HTTPIngressPath{} } else { - yyv1911 := &yyv1910[yyj1910] - yyv1911.CodecDecodeSelf(d) + yyv1840 := &yyv1839[yyj1839] + yyv1840.CodecDecodeSelf(d) } } - if yyrt1910 { - for ; yyj1910 < yyl1910; yyj1910++ { - yyv1910 = append(yyv1910, LabelSelectorRequirement{}) - yyh1910.ElemContainerState(yyj1910) + if yyrt1839 { + for ; yyj1839 < yyl1839; yyj1839++ { + yyv1839 = append(yyv1839, HTTPIngressPath{}) + yyh1839.ElemContainerState(yyj1839) if r.TryDecodeAsNil() { - yyv1910[yyj1910] = LabelSelectorRequirement{} + yyv1839[yyj1839] = HTTPIngressPath{} } else { - yyv1912 := &yyv1910[yyj1910] - yyv1912.CodecDecodeSelf(d) + yyv1841 := &yyv1839[yyj1839] + yyv1841.CodecDecodeSelf(d) } } } } else { - yyj1910 := 0 - for ; !r.CheckBreak(); yyj1910++ { + yyj1839 := 0 + for ; !r.CheckBreak(); yyj1839++ { - if yyj1910 >= len(yyv1910) { - yyv1910 = append(yyv1910, LabelSelectorRequirement{}) // var yyz1910 LabelSelectorRequirement - yyc1910 = true + if yyj1839 >= len(yyv1839) { + yyv1839 = append(yyv1839, HTTPIngressPath{}) // var yyz1839 HTTPIngressPath + yyc1839 = true } - yyh1910.ElemContainerState(yyj1910) - if yyj1910 < len(yyv1910) { + yyh1839.ElemContainerState(yyj1839) + if yyj1839 < len(yyv1839) { if r.TryDecodeAsNil() { - yyv1910[yyj1910] = LabelSelectorRequirement{} + yyv1839[yyj1839] = HTTPIngressPath{} } else { - yyv1913 := &yyv1910[yyj1910] - yyv1913.CodecDecodeSelf(d) + yyv1842 := &yyv1839[yyj1839] + yyv1842.CodecDecodeSelf(d) } } else { @@ -23486,115 +22658,115 @@ func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequ } } - if yyj1910 < len(yyv1910) { - yyv1910 = yyv1910[:yyj1910] - yyc1910 = true - } else if yyj1910 == 0 && yyv1910 == nil { - yyv1910 = []LabelSelectorRequirement{} - yyc1910 = true + if yyj1839 < len(yyv1839) { + yyv1839 = yyv1839[:yyj1839] + yyc1839 = true + } else if yyj1839 == 0 && yyv1839 == nil { + yyv1839 = []HTTPIngressPath{} + yyc1839 = true } } - yyh1910.End() - if yyc1910 { - *v = yyv1910 + yyh1839.End() + if yyc1839 { + *v = yyv1839 } } -func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceLabelSelectorRequirement(v []LabelSelectorRequirement, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1914 := range v { + for _, yyv1843 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1915 := &yyv1914 - yy1915.CodecEncodeSelf(e) + yy1844 := &yyv1843 + yy1844.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceLabelSelectorRequirement(v *[]LabelSelectorRequirement, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1916 := *v - yyh1916, yyl1916 := z.DecSliceHelperStart() - var yyc1916 bool - if yyl1916 == 0 { - if yyv1916 == nil { - yyv1916 = []ReplicaSet{} - yyc1916 = true - } else if len(yyv1916) != 0 { - yyv1916 = yyv1916[:0] - yyc1916 = true + yyv1845 := *v + yyh1845, yyl1845 := z.DecSliceHelperStart() + var yyc1845 bool + if yyl1845 == 0 { + if yyv1845 == nil { + yyv1845 = []LabelSelectorRequirement{} + yyc1845 = true + } else if len(yyv1845) != 0 { + yyv1845 = yyv1845[:0] + yyc1845 = true } - } else if yyl1916 > 0 { - var yyrr1916, yyrl1916 int - var yyrt1916 bool - if yyl1916 > cap(yyv1916) { + } else if yyl1845 > 0 { + var yyrr1845, yyrl1845 int + var yyrt1845 bool + if yyl1845 > cap(yyv1845) { - yyrg1916 := len(yyv1916) > 0 - yyv21916 := yyv1916 - yyrl1916, yyrt1916 = z.DecInferLen(yyl1916, z.DecBasicHandle().MaxInitLen, 768) - if yyrt1916 { - if yyrl1916 <= cap(yyv1916) { - yyv1916 = yyv1916[:yyrl1916] + yyrg1845 := len(yyv1845) > 0 + yyv21845 := yyv1845 + yyrl1845, yyrt1845 = z.DecInferLen(yyl1845, z.DecBasicHandle().MaxInitLen, 56) + if yyrt1845 { + if yyrl1845 <= cap(yyv1845) { + yyv1845 = yyv1845[:yyrl1845] } else { - yyv1916 = make([]ReplicaSet, yyrl1916) + yyv1845 = make([]LabelSelectorRequirement, yyrl1845) } } else { - yyv1916 = make([]ReplicaSet, yyrl1916) + yyv1845 = make([]LabelSelectorRequirement, yyrl1845) } - yyc1916 = true - yyrr1916 = len(yyv1916) - if yyrg1916 { - copy(yyv1916, yyv21916) + yyc1845 = true + yyrr1845 = len(yyv1845) + if yyrg1845 { + copy(yyv1845, yyv21845) } - } else if yyl1916 != len(yyv1916) { - yyv1916 = yyv1916[:yyl1916] - yyc1916 = true + } else if yyl1845 != len(yyv1845) { + yyv1845 = yyv1845[:yyl1845] + yyc1845 = true } - yyj1916 := 0 - for ; yyj1916 < yyrr1916; yyj1916++ { - yyh1916.ElemContainerState(yyj1916) + yyj1845 := 0 + for ; yyj1845 < yyrr1845; yyj1845++ { + yyh1845.ElemContainerState(yyj1845) if r.TryDecodeAsNil() { - yyv1916[yyj1916] = ReplicaSet{} + yyv1845[yyj1845] = LabelSelectorRequirement{} } else { - yyv1917 := &yyv1916[yyj1916] - yyv1917.CodecDecodeSelf(d) + yyv1846 := &yyv1845[yyj1845] + yyv1846.CodecDecodeSelf(d) } } - if yyrt1916 { - for ; yyj1916 < yyl1916; yyj1916++ { - yyv1916 = append(yyv1916, ReplicaSet{}) - yyh1916.ElemContainerState(yyj1916) + if yyrt1845 { + for ; yyj1845 < yyl1845; yyj1845++ { + yyv1845 = append(yyv1845, LabelSelectorRequirement{}) + yyh1845.ElemContainerState(yyj1845) if r.TryDecodeAsNil() { - yyv1916[yyj1916] = ReplicaSet{} + yyv1845[yyj1845] = LabelSelectorRequirement{} } else { - yyv1918 := &yyv1916[yyj1916] - yyv1918.CodecDecodeSelf(d) + yyv1847 := &yyv1845[yyj1845] + yyv1847.CodecDecodeSelf(d) } } } } else { - yyj1916 := 0 - for ; !r.CheckBreak(); yyj1916++ { + yyj1845 := 0 + for ; !r.CheckBreak(); yyj1845++ { - if yyj1916 >= len(yyv1916) { - yyv1916 = append(yyv1916, ReplicaSet{}) // var yyz1916 ReplicaSet - yyc1916 = true + if yyj1845 >= len(yyv1845) { + yyv1845 = append(yyv1845, LabelSelectorRequirement{}) // var yyz1845 LabelSelectorRequirement + yyc1845 = true } - yyh1916.ElemContainerState(yyj1916) - if yyj1916 < len(yyv1916) { + yyh1845.ElemContainerState(yyj1845) + if yyj1845 < len(yyv1845) { if r.TryDecodeAsNil() { - yyv1916[yyj1916] = ReplicaSet{} + yyv1845[yyj1845] = LabelSelectorRequirement{} } else { - yyv1919 := &yyv1916[yyj1916] - yyv1919.CodecDecodeSelf(d) + yyv1848 := &yyv1845[yyj1845] + yyv1848.CodecDecodeSelf(d) } } else { @@ -23602,112 +22774,115 @@ func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decode } } - if yyj1916 < len(yyv1916) { - yyv1916 = yyv1916[:yyj1916] - yyc1916 = true - } else if yyj1916 == 0 && yyv1916 == nil { - yyv1916 = []ReplicaSet{} - yyc1916 = true + if yyj1845 < len(yyv1845) { + yyv1845 = yyv1845[:yyj1845] + yyc1845 = true + } else if yyj1845 == 0 && yyv1845 == nil { + yyv1845 = []LabelSelectorRequirement{} + yyc1845 = true } } - yyh1916.End() - if yyc1916 { - *v = yyv1916 + yyh1845.End() + if yyc1845 { + *v = yyv1845 } } -func (x codecSelfer1234) encSlicev1_Capability(v []pkg2_v1.Capability, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceReplicaSet(v []ReplicaSet, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1920 := range v { + for _, yyv1849 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym1921 := z.EncBinary() - _ = yym1921 - if false { - } else if z.HasExtensions() && z.EncExt(yyv1920) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyv1920)) - } + yy1850 := &yyv1849 + yy1850.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceReplicaSet(v *[]ReplicaSet, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1922 := *v - yyh1922, yyl1922 := z.DecSliceHelperStart() - var yyc1922 bool - if yyl1922 == 0 { - if yyv1922 == nil { - yyv1922 = []pkg2_v1.Capability{} - yyc1922 = true - } else if len(yyv1922) != 0 { - yyv1922 = yyv1922[:0] - yyc1922 = true + yyv1851 := *v + yyh1851, yyl1851 := z.DecSliceHelperStart() + var yyc1851 bool + if yyl1851 == 0 { + if yyv1851 == nil { + yyv1851 = []ReplicaSet{} + yyc1851 = true + } else if len(yyv1851) != 0 { + yyv1851 = yyv1851[:0] + yyc1851 = true } - } else if yyl1922 > 0 { - var yyrr1922, yyrl1922 int - var yyrt1922 bool - if yyl1922 > cap(yyv1922) { + } else if yyl1851 > 0 { + var yyrr1851, yyrl1851 int + var yyrt1851 bool + if yyl1851 > cap(yyv1851) { - yyrl1922, yyrt1922 = z.DecInferLen(yyl1922, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1922 { - if yyrl1922 <= cap(yyv1922) { - yyv1922 = yyv1922[:yyrl1922] + yyrg1851 := len(yyv1851) > 0 + yyv21851 := yyv1851 + yyrl1851, yyrt1851 = z.DecInferLen(yyl1851, z.DecBasicHandle().MaxInitLen, 768) + if yyrt1851 { + if yyrl1851 <= cap(yyv1851) { + yyv1851 = yyv1851[:yyrl1851] } else { - yyv1922 = make([]pkg2_v1.Capability, yyrl1922) + yyv1851 = make([]ReplicaSet, yyrl1851) } } else { - yyv1922 = make([]pkg2_v1.Capability, yyrl1922) + yyv1851 = make([]ReplicaSet, yyrl1851) + } + yyc1851 = true + yyrr1851 = len(yyv1851) + if yyrg1851 { + copy(yyv1851, yyv21851) } - yyc1922 = true - yyrr1922 = len(yyv1922) - } else if yyl1922 != len(yyv1922) { - yyv1922 = yyv1922[:yyl1922] - yyc1922 = true + } else if yyl1851 != len(yyv1851) { + yyv1851 = yyv1851[:yyl1851] + yyc1851 = true } - yyj1922 := 0 - for ; yyj1922 < yyrr1922; yyj1922++ { - yyh1922.ElemContainerState(yyj1922) + yyj1851 := 0 + for ; yyj1851 < yyrr1851; yyj1851++ { + yyh1851.ElemContainerState(yyj1851) if r.TryDecodeAsNil() { - yyv1922[yyj1922] = "" + yyv1851[yyj1851] = ReplicaSet{} } else { - yyv1922[yyj1922] = pkg2_v1.Capability(r.DecodeString()) + yyv1852 := &yyv1851[yyj1851] + yyv1852.CodecDecodeSelf(d) } } - if yyrt1922 { - for ; yyj1922 < yyl1922; yyj1922++ { - yyv1922 = append(yyv1922, "") - yyh1922.ElemContainerState(yyj1922) + if yyrt1851 { + for ; yyj1851 < yyl1851; yyj1851++ { + yyv1851 = append(yyv1851, ReplicaSet{}) + yyh1851.ElemContainerState(yyj1851) if r.TryDecodeAsNil() { - yyv1922[yyj1922] = "" + yyv1851[yyj1851] = ReplicaSet{} } else { - yyv1922[yyj1922] = pkg2_v1.Capability(r.DecodeString()) + yyv1853 := &yyv1851[yyj1851] + yyv1853.CodecDecodeSelf(d) } } } } else { - yyj1922 := 0 - for ; !r.CheckBreak(); yyj1922++ { + yyj1851 := 0 + for ; !r.CheckBreak(); yyj1851++ { - if yyj1922 >= len(yyv1922) { - yyv1922 = append(yyv1922, "") // var yyz1922 pkg2_v1.Capability - yyc1922 = true + if yyj1851 >= len(yyv1851) { + yyv1851 = append(yyv1851, ReplicaSet{}) // var yyz1851 ReplicaSet + yyc1851 = true } - yyh1922.ElemContainerState(yyj1922) - if yyj1922 < len(yyv1922) { + yyh1851.ElemContainerState(yyj1851) + if yyj1851 < len(yyv1851) { if r.TryDecodeAsNil() { - yyv1922[yyj1922] = "" + yyv1851[yyj1851] = ReplicaSet{} } else { - yyv1922[yyj1922] = pkg2_v1.Capability(r.DecodeString()) + yyv1854 := &yyv1851[yyj1851] + yyv1854.CodecDecodeSelf(d) } } else { @@ -23715,106 +22890,112 @@ func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec } } - if yyj1922 < len(yyv1922) { - yyv1922 = yyv1922[:yyj1922] - yyc1922 = true - } else if yyj1922 == 0 && yyv1922 == nil { - yyv1922 = []pkg2_v1.Capability{} - yyc1922 = true + if yyj1851 < len(yyv1851) { + yyv1851 = yyv1851[:yyj1851] + yyc1851 = true + } else if yyj1851 == 0 && yyv1851 == nil { + yyv1851 = []ReplicaSet{} + yyc1851 = true } } - yyh1922.End() - if yyc1922 { - *v = yyv1922 + yyh1851.End() + if yyc1851 { + *v = yyv1851 } } -func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicev1_Capability(v []pkg2_v1.Capability, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1926 := range v { + for _, yyv1855 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yyv1926.CodecEncodeSelf(e) + yym1856 := z.EncBinary() + _ = yym1856 + if false { + } else if z.HasExtensions() && z.EncExt(yyv1855) { + } else { + r.EncodeString(codecSelferC_UTF81234, string(yyv1855)) + } } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicev1_Capability(v *[]pkg2_v1.Capability, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1927 := *v - yyh1927, yyl1927 := z.DecSliceHelperStart() - var yyc1927 bool - if yyl1927 == 0 { - if yyv1927 == nil { - yyv1927 = []FSType{} - yyc1927 = true - } else if len(yyv1927) != 0 { - yyv1927 = yyv1927[:0] - yyc1927 = true + yyv1857 := *v + yyh1857, yyl1857 := z.DecSliceHelperStart() + var yyc1857 bool + if yyl1857 == 0 { + if yyv1857 == nil { + yyv1857 = []pkg2_v1.Capability{} + yyc1857 = true + } else if len(yyv1857) != 0 { + yyv1857 = yyv1857[:0] + yyc1857 = true } - } else if yyl1927 > 0 { - var yyrr1927, yyrl1927 int - var yyrt1927 bool - if yyl1927 > cap(yyv1927) { + } else if yyl1857 > 0 { + var yyrr1857, yyrl1857 int + var yyrt1857 bool + if yyl1857 > cap(yyv1857) { - yyrl1927, yyrt1927 = z.DecInferLen(yyl1927, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1927 { - if yyrl1927 <= cap(yyv1927) { - yyv1927 = yyv1927[:yyrl1927] + yyrl1857, yyrt1857 = z.DecInferLen(yyl1857, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1857 { + if yyrl1857 <= cap(yyv1857) { + yyv1857 = yyv1857[:yyrl1857] } else { - yyv1927 = make([]FSType, yyrl1927) + yyv1857 = make([]pkg2_v1.Capability, yyrl1857) } } else { - yyv1927 = make([]FSType, yyrl1927) + yyv1857 = make([]pkg2_v1.Capability, yyrl1857) } - yyc1927 = true - yyrr1927 = len(yyv1927) - } else if yyl1927 != len(yyv1927) { - yyv1927 = yyv1927[:yyl1927] - yyc1927 = true + yyc1857 = true + yyrr1857 = len(yyv1857) + } else if yyl1857 != len(yyv1857) { + yyv1857 = yyv1857[:yyl1857] + yyc1857 = true } - yyj1927 := 0 - for ; yyj1927 < yyrr1927; yyj1927++ { - yyh1927.ElemContainerState(yyj1927) + yyj1857 := 0 + for ; yyj1857 < yyrr1857; yyj1857++ { + yyh1857.ElemContainerState(yyj1857) if r.TryDecodeAsNil() { - yyv1927[yyj1927] = "" + yyv1857[yyj1857] = "" } else { - yyv1927[yyj1927] = FSType(r.DecodeString()) + yyv1857[yyj1857] = pkg2_v1.Capability(r.DecodeString()) } } - if yyrt1927 { - for ; yyj1927 < yyl1927; yyj1927++ { - yyv1927 = append(yyv1927, "") - yyh1927.ElemContainerState(yyj1927) + if yyrt1857 { + for ; yyj1857 < yyl1857; yyj1857++ { + yyv1857 = append(yyv1857, "") + yyh1857.ElemContainerState(yyj1857) if r.TryDecodeAsNil() { - yyv1927[yyj1927] = "" + yyv1857[yyj1857] = "" } else { - yyv1927[yyj1927] = FSType(r.DecodeString()) + yyv1857[yyj1857] = pkg2_v1.Capability(r.DecodeString()) } } } } else { - yyj1927 := 0 - for ; !r.CheckBreak(); yyj1927++ { + yyj1857 := 0 + for ; !r.CheckBreak(); yyj1857++ { - if yyj1927 >= len(yyv1927) { - yyv1927 = append(yyv1927, "") // var yyz1927 FSType - yyc1927 = true + if yyj1857 >= len(yyv1857) { + yyv1857 = append(yyv1857, "") // var yyz1857 pkg2_v1.Capability + yyc1857 = true } - yyh1927.ElemContainerState(yyj1927) - if yyj1927 < len(yyv1927) { + yyh1857.ElemContainerState(yyj1857) + if yyj1857 < len(yyv1857) { if r.TryDecodeAsNil() { - yyv1927[yyj1927] = "" + yyv1857[yyj1857] = "" } else { - yyv1927[yyj1927] = FSType(r.DecodeString()) + yyv1857[yyj1857] = pkg2_v1.Capability(r.DecodeString()) } } else { @@ -23822,115 +23003,106 @@ func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { } } - if yyj1927 < len(yyv1927) { - yyv1927 = yyv1927[:yyj1927] - yyc1927 = true - } else if yyj1927 == 0 && yyv1927 == nil { - yyv1927 = []FSType{} - yyc1927 = true + if yyj1857 < len(yyv1857) { + yyv1857 = yyv1857[:yyj1857] + yyc1857 = true + } else if yyj1857 == 0 && yyv1857 == nil { + yyv1857 = []pkg2_v1.Capability{} + yyc1857 = true } } - yyh1927.End() - if yyc1927 { - *v = yyv1927 + yyh1857.End() + if yyc1857 { + *v = yyv1857 } } -func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceFSType(v []FSType, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1931 := range v { + for _, yyv1861 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1932 := &yyv1931 - yy1932.CodecEncodeSelf(e) + yyv1861.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceFSType(v *[]FSType, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1933 := *v - yyh1933, yyl1933 := z.DecSliceHelperStart() - var yyc1933 bool - if yyl1933 == 0 { - if yyv1933 == nil { - yyv1933 = []HostPortRange{} - yyc1933 = true - } else if len(yyv1933) != 0 { - yyv1933 = yyv1933[:0] - yyc1933 = true + yyv1862 := *v + yyh1862, yyl1862 := z.DecSliceHelperStart() + var yyc1862 bool + if yyl1862 == 0 { + if yyv1862 == nil { + yyv1862 = []FSType{} + yyc1862 = true + } else if len(yyv1862) != 0 { + yyv1862 = yyv1862[:0] + yyc1862 = true } - } else if yyl1933 > 0 { - var yyrr1933, yyrl1933 int - var yyrt1933 bool - if yyl1933 > cap(yyv1933) { + } else if yyl1862 > 0 { + var yyrr1862, yyrl1862 int + var yyrt1862 bool + if yyl1862 > cap(yyv1862) { - yyrg1933 := len(yyv1933) > 0 - yyv21933 := yyv1933 - yyrl1933, yyrt1933 = z.DecInferLen(yyl1933, z.DecBasicHandle().MaxInitLen, 8) - if yyrt1933 { - if yyrl1933 <= cap(yyv1933) { - yyv1933 = yyv1933[:yyrl1933] + yyrl1862, yyrt1862 = z.DecInferLen(yyl1862, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1862 { + if yyrl1862 <= cap(yyv1862) { + yyv1862 = yyv1862[:yyrl1862] } else { - yyv1933 = make([]HostPortRange, yyrl1933) + yyv1862 = make([]FSType, yyrl1862) } } else { - yyv1933 = make([]HostPortRange, yyrl1933) + yyv1862 = make([]FSType, yyrl1862) } - yyc1933 = true - yyrr1933 = len(yyv1933) - if yyrg1933 { - copy(yyv1933, yyv21933) - } - } else if yyl1933 != len(yyv1933) { - yyv1933 = yyv1933[:yyl1933] - yyc1933 = true + yyc1862 = true + yyrr1862 = len(yyv1862) + } else if yyl1862 != len(yyv1862) { + yyv1862 = yyv1862[:yyl1862] + yyc1862 = true } - yyj1933 := 0 - for ; yyj1933 < yyrr1933; yyj1933++ { - yyh1933.ElemContainerState(yyj1933) + yyj1862 := 0 + for ; yyj1862 < yyrr1862; yyj1862++ { + yyh1862.ElemContainerState(yyj1862) if r.TryDecodeAsNil() { - yyv1933[yyj1933] = HostPortRange{} + yyv1862[yyj1862] = "" } else { - yyv1934 := &yyv1933[yyj1933] - yyv1934.CodecDecodeSelf(d) + yyv1862[yyj1862] = FSType(r.DecodeString()) } } - if yyrt1933 { - for ; yyj1933 < yyl1933; yyj1933++ { - yyv1933 = append(yyv1933, HostPortRange{}) - yyh1933.ElemContainerState(yyj1933) + if yyrt1862 { + for ; yyj1862 < yyl1862; yyj1862++ { + yyv1862 = append(yyv1862, "") + yyh1862.ElemContainerState(yyj1862) if r.TryDecodeAsNil() { - yyv1933[yyj1933] = HostPortRange{} + yyv1862[yyj1862] = "" } else { - yyv1935 := &yyv1933[yyj1933] - yyv1935.CodecDecodeSelf(d) + yyv1862[yyj1862] = FSType(r.DecodeString()) } } } } else { - yyj1933 := 0 - for ; !r.CheckBreak(); yyj1933++ { + yyj1862 := 0 + for ; !r.CheckBreak(); yyj1862++ { - if yyj1933 >= len(yyv1933) { - yyv1933 = append(yyv1933, HostPortRange{}) // var yyz1933 HostPortRange - yyc1933 = true + if yyj1862 >= len(yyv1862) { + yyv1862 = append(yyv1862, "") // var yyz1862 FSType + yyc1862 = true } - yyh1933.ElemContainerState(yyj1933) - if yyj1933 < len(yyv1933) { + yyh1862.ElemContainerState(yyj1862) + if yyj1862 < len(yyv1862) { if r.TryDecodeAsNil() { - yyv1933[yyj1933] = HostPortRange{} + yyv1862[yyj1862] = "" } else { - yyv1936 := &yyv1933[yyj1933] - yyv1936.CodecDecodeSelf(d) + yyv1862[yyj1862] = FSType(r.DecodeString()) } } else { @@ -23938,115 +23110,115 @@ func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978. } } - if yyj1933 < len(yyv1933) { - yyv1933 = yyv1933[:yyj1933] - yyc1933 = true - } else if yyj1933 == 0 && yyv1933 == nil { - yyv1933 = []HostPortRange{} - yyc1933 = true + if yyj1862 < len(yyv1862) { + yyv1862 = yyv1862[:yyj1862] + yyc1862 = true + } else if yyj1862 == 0 && yyv1862 == nil { + yyv1862 = []FSType{} + yyc1862 = true } } - yyh1933.End() - if yyc1933 { - *v = yyv1933 + yyh1862.End() + if yyc1862 { + *v = yyv1862 } } -func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceHostPortRange(v []HostPortRange, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1937 := range v { + for _, yyv1866 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1938 := &yyv1937 - yy1938.CodecEncodeSelf(e) + yy1867 := &yyv1866 + yy1867.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceHostPortRange(v *[]HostPortRange, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1939 := *v - yyh1939, yyl1939 := z.DecSliceHelperStart() - var yyc1939 bool - if yyl1939 == 0 { - if yyv1939 == nil { - yyv1939 = []IDRange{} - yyc1939 = true - } else if len(yyv1939) != 0 { - yyv1939 = yyv1939[:0] - yyc1939 = true + yyv1868 := *v + yyh1868, yyl1868 := z.DecSliceHelperStart() + var yyc1868 bool + if yyl1868 == 0 { + if yyv1868 == nil { + yyv1868 = []HostPortRange{} + yyc1868 = true + } else if len(yyv1868) != 0 { + yyv1868 = yyv1868[:0] + yyc1868 = true } - } else if yyl1939 > 0 { - var yyrr1939, yyrl1939 int - var yyrt1939 bool - if yyl1939 > cap(yyv1939) { + } else if yyl1868 > 0 { + var yyrr1868, yyrl1868 int + var yyrt1868 bool + if yyl1868 > cap(yyv1868) { - yyrg1939 := len(yyv1939) > 0 - yyv21939 := yyv1939 - yyrl1939, yyrt1939 = z.DecInferLen(yyl1939, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1939 { - if yyrl1939 <= cap(yyv1939) { - yyv1939 = yyv1939[:yyrl1939] + yyrg1868 := len(yyv1868) > 0 + yyv21868 := yyv1868 + yyrl1868, yyrt1868 = z.DecInferLen(yyl1868, z.DecBasicHandle().MaxInitLen, 8) + if yyrt1868 { + if yyrl1868 <= cap(yyv1868) { + yyv1868 = yyv1868[:yyrl1868] } else { - yyv1939 = make([]IDRange, yyrl1939) + yyv1868 = make([]HostPortRange, yyrl1868) } } else { - yyv1939 = make([]IDRange, yyrl1939) + yyv1868 = make([]HostPortRange, yyrl1868) } - yyc1939 = true - yyrr1939 = len(yyv1939) - if yyrg1939 { - copy(yyv1939, yyv21939) + yyc1868 = true + yyrr1868 = len(yyv1868) + if yyrg1868 { + copy(yyv1868, yyv21868) } - } else if yyl1939 != len(yyv1939) { - yyv1939 = yyv1939[:yyl1939] - yyc1939 = true + } else if yyl1868 != len(yyv1868) { + yyv1868 = yyv1868[:yyl1868] + yyc1868 = true } - yyj1939 := 0 - for ; yyj1939 < yyrr1939; yyj1939++ { - yyh1939.ElemContainerState(yyj1939) + yyj1868 := 0 + for ; yyj1868 < yyrr1868; yyj1868++ { + yyh1868.ElemContainerState(yyj1868) if r.TryDecodeAsNil() { - yyv1939[yyj1939] = IDRange{} + yyv1868[yyj1868] = HostPortRange{} } else { - yyv1940 := &yyv1939[yyj1939] - yyv1940.CodecDecodeSelf(d) + yyv1869 := &yyv1868[yyj1868] + yyv1869.CodecDecodeSelf(d) } } - if yyrt1939 { - for ; yyj1939 < yyl1939; yyj1939++ { - yyv1939 = append(yyv1939, IDRange{}) - yyh1939.ElemContainerState(yyj1939) + if yyrt1868 { + for ; yyj1868 < yyl1868; yyj1868++ { + yyv1868 = append(yyv1868, HostPortRange{}) + yyh1868.ElemContainerState(yyj1868) if r.TryDecodeAsNil() { - yyv1939[yyj1939] = IDRange{} + yyv1868[yyj1868] = HostPortRange{} } else { - yyv1941 := &yyv1939[yyj1939] - yyv1941.CodecDecodeSelf(d) + yyv1870 := &yyv1868[yyj1868] + yyv1870.CodecDecodeSelf(d) } } } } else { - yyj1939 := 0 - for ; !r.CheckBreak(); yyj1939++ { + yyj1868 := 0 + for ; !r.CheckBreak(); yyj1868++ { - if yyj1939 >= len(yyv1939) { - yyv1939 = append(yyv1939, IDRange{}) // var yyz1939 IDRange - yyc1939 = true + if yyj1868 >= len(yyv1868) { + yyv1868 = append(yyv1868, HostPortRange{}) // var yyz1868 HostPortRange + yyc1868 = true } - yyh1939.ElemContainerState(yyj1939) - if yyj1939 < len(yyv1939) { + yyh1868.ElemContainerState(yyj1868) + if yyj1868 < len(yyv1868) { if r.TryDecodeAsNil() { - yyv1939[yyj1939] = IDRange{} + yyv1868[yyj1868] = HostPortRange{} } else { - yyv1942 := &yyv1939[yyj1939] - yyv1942.CodecDecodeSelf(d) + yyv1871 := &yyv1868[yyj1868] + yyv1871.CodecDecodeSelf(d) } } else { @@ -24054,115 +23226,115 @@ func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { } } - if yyj1939 < len(yyv1939) { - yyv1939 = yyv1939[:yyj1939] - yyc1939 = true - } else if yyj1939 == 0 && yyv1939 == nil { - yyv1939 = []IDRange{} - yyc1939 = true + if yyj1868 < len(yyv1868) { + yyv1868 = yyv1868[:yyj1868] + yyc1868 = true + } else if yyj1868 == 0 && yyv1868 == nil { + yyv1868 = []HostPortRange{} + yyc1868 = true } } - yyh1939.End() - if yyc1939 { - *v = yyv1939 + yyh1868.End() + if yyc1868 { + *v = yyv1868 } } -func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceIDRange(v []IDRange, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1943 := range v { + for _, yyv1872 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1944 := &yyv1943 - yy1944.CodecEncodeSelf(e) + yy1873 := &yyv1872 + yy1873.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceIDRange(v *[]IDRange, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1945 := *v - yyh1945, yyl1945 := z.DecSliceHelperStart() - var yyc1945 bool - if yyl1945 == 0 { - if yyv1945 == nil { - yyv1945 = []PodSecurityPolicy{} - yyc1945 = true - } else if len(yyv1945) != 0 { - yyv1945 = yyv1945[:0] - yyc1945 = true + yyv1874 := *v + yyh1874, yyl1874 := z.DecSliceHelperStart() + var yyc1874 bool + if yyl1874 == 0 { + if yyv1874 == nil { + yyv1874 = []IDRange{} + yyc1874 = true + } else if len(yyv1874) != 0 { + yyv1874 = yyv1874[:0] + yyc1874 = true } - } else if yyl1945 > 0 { - var yyrr1945, yyrl1945 int - var yyrt1945 bool - if yyl1945 > cap(yyv1945) { + } else if yyl1874 > 0 { + var yyrr1874, yyrl1874 int + var yyrt1874 bool + if yyl1874 > cap(yyv1874) { - yyrg1945 := len(yyv1945) > 0 - yyv21945 := yyv1945 - yyrl1945, yyrt1945 = z.DecInferLen(yyl1945, z.DecBasicHandle().MaxInitLen, 552) - if yyrt1945 { - if yyrl1945 <= cap(yyv1945) { - yyv1945 = yyv1945[:yyrl1945] + yyrg1874 := len(yyv1874) > 0 + yyv21874 := yyv1874 + yyrl1874, yyrt1874 = z.DecInferLen(yyl1874, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1874 { + if yyrl1874 <= cap(yyv1874) { + yyv1874 = yyv1874[:yyrl1874] } else { - yyv1945 = make([]PodSecurityPolicy, yyrl1945) + yyv1874 = make([]IDRange, yyrl1874) } } else { - yyv1945 = make([]PodSecurityPolicy, yyrl1945) + yyv1874 = make([]IDRange, yyrl1874) } - yyc1945 = true - yyrr1945 = len(yyv1945) - if yyrg1945 { - copy(yyv1945, yyv21945) + yyc1874 = true + yyrr1874 = len(yyv1874) + if yyrg1874 { + copy(yyv1874, yyv21874) } - } else if yyl1945 != len(yyv1945) { - yyv1945 = yyv1945[:yyl1945] - yyc1945 = true + } else if yyl1874 != len(yyv1874) { + yyv1874 = yyv1874[:yyl1874] + yyc1874 = true } - yyj1945 := 0 - for ; yyj1945 < yyrr1945; yyj1945++ { - yyh1945.ElemContainerState(yyj1945) + yyj1874 := 0 + for ; yyj1874 < yyrr1874; yyj1874++ { + yyh1874.ElemContainerState(yyj1874) if r.TryDecodeAsNil() { - yyv1945[yyj1945] = PodSecurityPolicy{} + yyv1874[yyj1874] = IDRange{} } else { - yyv1946 := &yyv1945[yyj1945] - yyv1946.CodecDecodeSelf(d) + yyv1875 := &yyv1874[yyj1874] + yyv1875.CodecDecodeSelf(d) } } - if yyrt1945 { - for ; yyj1945 < yyl1945; yyj1945++ { - yyv1945 = append(yyv1945, PodSecurityPolicy{}) - yyh1945.ElemContainerState(yyj1945) + if yyrt1874 { + for ; yyj1874 < yyl1874; yyj1874++ { + yyv1874 = append(yyv1874, IDRange{}) + yyh1874.ElemContainerState(yyj1874) if r.TryDecodeAsNil() { - yyv1945[yyj1945] = PodSecurityPolicy{} + yyv1874[yyj1874] = IDRange{} } else { - yyv1947 := &yyv1945[yyj1945] - yyv1947.CodecDecodeSelf(d) + yyv1876 := &yyv1874[yyj1874] + yyv1876.CodecDecodeSelf(d) } } } } else { - yyj1945 := 0 - for ; !r.CheckBreak(); yyj1945++ { + yyj1874 := 0 + for ; !r.CheckBreak(); yyj1874++ { - if yyj1945 >= len(yyv1945) { - yyv1945 = append(yyv1945, PodSecurityPolicy{}) // var yyz1945 PodSecurityPolicy - yyc1945 = true + if yyj1874 >= len(yyv1874) { + yyv1874 = append(yyv1874, IDRange{}) // var yyz1874 IDRange + yyc1874 = true } - yyh1945.ElemContainerState(yyj1945) - if yyj1945 < len(yyv1945) { + yyh1874.ElemContainerState(yyj1874) + if yyj1874 < len(yyv1874) { if r.TryDecodeAsNil() { - yyv1945[yyj1945] = PodSecurityPolicy{} + yyv1874[yyj1874] = IDRange{} } else { - yyv1948 := &yyv1945[yyj1945] - yyv1948.CodecDecodeSelf(d) + yyv1877 := &yyv1874[yyj1874] + yyv1877.CodecDecodeSelf(d) } } else { @@ -24170,115 +23342,115 @@ func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *co } } - if yyj1945 < len(yyv1945) { - yyv1945 = yyv1945[:yyj1945] - yyc1945 = true - } else if yyj1945 == 0 && yyv1945 == nil { - yyv1945 = []PodSecurityPolicy{} - yyc1945 = true + if yyj1874 < len(yyv1874) { + yyv1874 = yyv1874[:yyj1874] + yyc1874 = true + } else if yyj1874 == 0 && yyv1874 == nil { + yyv1874 = []IDRange{} + yyc1874 = true } } - yyh1945.End() - if yyc1945 { - *v = yyv1945 + yyh1874.End() + if yyc1874 { + *v = yyv1874 } } -func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { +func (x codecSelfer1234) encSlicePodSecurityPolicy(v []PodSecurityPolicy, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1949 := range v { + for _, yyv1878 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1950 := &yyv1949 - yy1950.CodecEncodeSelf(e) + yy1879 := &yyv1878 + yy1879.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { +func (x codecSelfer1234) decSlicePodSecurityPolicy(v *[]PodSecurityPolicy, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1951 := *v - yyh1951, yyl1951 := z.DecSliceHelperStart() - var yyc1951 bool - if yyl1951 == 0 { - if yyv1951 == nil { - yyv1951 = []NetworkPolicyIngressRule{} - yyc1951 = true - } else if len(yyv1951) != 0 { - yyv1951 = yyv1951[:0] - yyc1951 = true + yyv1880 := *v + yyh1880, yyl1880 := z.DecSliceHelperStart() + var yyc1880 bool + if yyl1880 == 0 { + if yyv1880 == nil { + yyv1880 = []PodSecurityPolicy{} + yyc1880 = true + } else if len(yyv1880) != 0 { + yyv1880 = yyv1880[:0] + yyc1880 = true } - } else if yyl1951 > 0 { - var yyrr1951, yyrl1951 int - var yyrt1951 bool - if yyl1951 > cap(yyv1951) { + } else if yyl1880 > 0 { + var yyrr1880, yyrl1880 int + var yyrt1880 bool + if yyl1880 > cap(yyv1880) { - yyrg1951 := len(yyv1951) > 0 - yyv21951 := yyv1951 - yyrl1951, yyrt1951 = z.DecInferLen(yyl1951, z.DecBasicHandle().MaxInitLen, 48) - if yyrt1951 { - if yyrl1951 <= cap(yyv1951) { - yyv1951 = yyv1951[:yyrl1951] + yyrg1880 := len(yyv1880) > 0 + yyv21880 := yyv1880 + yyrl1880, yyrt1880 = z.DecInferLen(yyl1880, z.DecBasicHandle().MaxInitLen, 552) + if yyrt1880 { + if yyrl1880 <= cap(yyv1880) { + yyv1880 = yyv1880[:yyrl1880] } else { - yyv1951 = make([]NetworkPolicyIngressRule, yyrl1951) + yyv1880 = make([]PodSecurityPolicy, yyrl1880) } } else { - yyv1951 = make([]NetworkPolicyIngressRule, yyrl1951) + yyv1880 = make([]PodSecurityPolicy, yyrl1880) } - yyc1951 = true - yyrr1951 = len(yyv1951) - if yyrg1951 { - copy(yyv1951, yyv21951) + yyc1880 = true + yyrr1880 = len(yyv1880) + if yyrg1880 { + copy(yyv1880, yyv21880) } - } else if yyl1951 != len(yyv1951) { - yyv1951 = yyv1951[:yyl1951] - yyc1951 = true + } else if yyl1880 != len(yyv1880) { + yyv1880 = yyv1880[:yyl1880] + yyc1880 = true } - yyj1951 := 0 - for ; yyj1951 < yyrr1951; yyj1951++ { - yyh1951.ElemContainerState(yyj1951) + yyj1880 := 0 + for ; yyj1880 < yyrr1880; yyj1880++ { + yyh1880.ElemContainerState(yyj1880) if r.TryDecodeAsNil() { - yyv1951[yyj1951] = NetworkPolicyIngressRule{} + yyv1880[yyj1880] = PodSecurityPolicy{} } else { - yyv1952 := &yyv1951[yyj1951] - yyv1952.CodecDecodeSelf(d) + yyv1881 := &yyv1880[yyj1880] + yyv1881.CodecDecodeSelf(d) } } - if yyrt1951 { - for ; yyj1951 < yyl1951; yyj1951++ { - yyv1951 = append(yyv1951, NetworkPolicyIngressRule{}) - yyh1951.ElemContainerState(yyj1951) + if yyrt1880 { + for ; yyj1880 < yyl1880; yyj1880++ { + yyv1880 = append(yyv1880, PodSecurityPolicy{}) + yyh1880.ElemContainerState(yyj1880) if r.TryDecodeAsNil() { - yyv1951[yyj1951] = NetworkPolicyIngressRule{} + yyv1880[yyj1880] = PodSecurityPolicy{} } else { - yyv1953 := &yyv1951[yyj1951] - yyv1953.CodecDecodeSelf(d) + yyv1882 := &yyv1880[yyj1880] + yyv1882.CodecDecodeSelf(d) } } } } else { - yyj1951 := 0 - for ; !r.CheckBreak(); yyj1951++ { + yyj1880 := 0 + for ; !r.CheckBreak(); yyj1880++ { - if yyj1951 >= len(yyv1951) { - yyv1951 = append(yyv1951, NetworkPolicyIngressRule{}) // var yyz1951 NetworkPolicyIngressRule - yyc1951 = true + if yyj1880 >= len(yyv1880) { + yyv1880 = append(yyv1880, PodSecurityPolicy{}) // var yyz1880 PodSecurityPolicy + yyc1880 = true } - yyh1951.ElemContainerState(yyj1951) - if yyj1951 < len(yyv1951) { + yyh1880.ElemContainerState(yyj1880) + if yyj1880 < len(yyv1880) { if r.TryDecodeAsNil() { - yyv1951[yyj1951] = NetworkPolicyIngressRule{} + yyv1880[yyj1880] = PodSecurityPolicy{} } else { - yyv1954 := &yyv1951[yyj1951] - yyv1954.CodecDecodeSelf(d) + yyv1883 := &yyv1880[yyj1880] + yyv1883.CodecDecodeSelf(d) } } else { @@ -24286,115 +23458,115 @@ func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngr } } - if yyj1951 < len(yyv1951) { - yyv1951 = yyv1951[:yyj1951] - yyc1951 = true - } else if yyj1951 == 0 && yyv1951 == nil { - yyv1951 = []NetworkPolicyIngressRule{} - yyc1951 = true + if yyj1880 < len(yyv1880) { + yyv1880 = yyv1880[:yyj1880] + yyc1880 = true + } else if yyj1880 == 0 && yyv1880 == nil { + yyv1880 = []PodSecurityPolicy{} + yyc1880 = true } } - yyh1951.End() - if yyc1951 { - *v = yyv1951 + yyh1880.End() + if yyc1880 { + *v = yyv1880 } } -func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyIngressRule(v []NetworkPolicyIngressRule, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1955 := range v { + for _, yyv1884 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1956 := &yyv1955 - yy1956.CodecEncodeSelf(e) + yy1885 := &yyv1884 + yy1885.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyIngressRule(v *[]NetworkPolicyIngressRule, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1957 := *v - yyh1957, yyl1957 := z.DecSliceHelperStart() - var yyc1957 bool - if yyl1957 == 0 { - if yyv1957 == nil { - yyv1957 = []NetworkPolicyPort{} - yyc1957 = true - } else if len(yyv1957) != 0 { - yyv1957 = yyv1957[:0] - yyc1957 = true + yyv1886 := *v + yyh1886, yyl1886 := z.DecSliceHelperStart() + var yyc1886 bool + if yyl1886 == 0 { + if yyv1886 == nil { + yyv1886 = []NetworkPolicyIngressRule{} + yyc1886 = true + } else if len(yyv1886) != 0 { + yyv1886 = yyv1886[:0] + yyc1886 = true } - } else if yyl1957 > 0 { - var yyrr1957, yyrl1957 int - var yyrt1957 bool - if yyl1957 > cap(yyv1957) { + } else if yyl1886 > 0 { + var yyrr1886, yyrl1886 int + var yyrt1886 bool + if yyl1886 > cap(yyv1886) { - yyrg1957 := len(yyv1957) > 0 - yyv21957 := yyv1957 - yyrl1957, yyrt1957 = z.DecInferLen(yyl1957, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1957 { - if yyrl1957 <= cap(yyv1957) { - yyv1957 = yyv1957[:yyrl1957] + yyrg1886 := len(yyv1886) > 0 + yyv21886 := yyv1886 + yyrl1886, yyrt1886 = z.DecInferLen(yyl1886, z.DecBasicHandle().MaxInitLen, 48) + if yyrt1886 { + if yyrl1886 <= cap(yyv1886) { + yyv1886 = yyv1886[:yyrl1886] } else { - yyv1957 = make([]NetworkPolicyPort, yyrl1957) + yyv1886 = make([]NetworkPolicyIngressRule, yyrl1886) } } else { - yyv1957 = make([]NetworkPolicyPort, yyrl1957) + yyv1886 = make([]NetworkPolicyIngressRule, yyrl1886) } - yyc1957 = true - yyrr1957 = len(yyv1957) - if yyrg1957 { - copy(yyv1957, yyv21957) + yyc1886 = true + yyrr1886 = len(yyv1886) + if yyrg1886 { + copy(yyv1886, yyv21886) } - } else if yyl1957 != len(yyv1957) { - yyv1957 = yyv1957[:yyl1957] - yyc1957 = true + } else if yyl1886 != len(yyv1886) { + yyv1886 = yyv1886[:yyl1886] + yyc1886 = true } - yyj1957 := 0 - for ; yyj1957 < yyrr1957; yyj1957++ { - yyh1957.ElemContainerState(yyj1957) + yyj1886 := 0 + for ; yyj1886 < yyrr1886; yyj1886++ { + yyh1886.ElemContainerState(yyj1886) if r.TryDecodeAsNil() { - yyv1957[yyj1957] = NetworkPolicyPort{} + yyv1886[yyj1886] = NetworkPolicyIngressRule{} } else { - yyv1958 := &yyv1957[yyj1957] - yyv1958.CodecDecodeSelf(d) + yyv1887 := &yyv1886[yyj1886] + yyv1887.CodecDecodeSelf(d) } } - if yyrt1957 { - for ; yyj1957 < yyl1957; yyj1957++ { - yyv1957 = append(yyv1957, NetworkPolicyPort{}) - yyh1957.ElemContainerState(yyj1957) + if yyrt1886 { + for ; yyj1886 < yyl1886; yyj1886++ { + yyv1886 = append(yyv1886, NetworkPolicyIngressRule{}) + yyh1886.ElemContainerState(yyj1886) if r.TryDecodeAsNil() { - yyv1957[yyj1957] = NetworkPolicyPort{} + yyv1886[yyj1886] = NetworkPolicyIngressRule{} } else { - yyv1959 := &yyv1957[yyj1957] - yyv1959.CodecDecodeSelf(d) + yyv1888 := &yyv1886[yyj1886] + yyv1888.CodecDecodeSelf(d) } } } } else { - yyj1957 := 0 - for ; !r.CheckBreak(); yyj1957++ { + yyj1886 := 0 + for ; !r.CheckBreak(); yyj1886++ { - if yyj1957 >= len(yyv1957) { - yyv1957 = append(yyv1957, NetworkPolicyPort{}) // var yyz1957 NetworkPolicyPort - yyc1957 = true + if yyj1886 >= len(yyv1886) { + yyv1886 = append(yyv1886, NetworkPolicyIngressRule{}) // var yyz1886 NetworkPolicyIngressRule + yyc1886 = true } - yyh1957.ElemContainerState(yyj1957) - if yyj1957 < len(yyv1957) { + yyh1886.ElemContainerState(yyj1886) + if yyj1886 < len(yyv1886) { if r.TryDecodeAsNil() { - yyv1957[yyj1957] = NetworkPolicyPort{} + yyv1886[yyj1886] = NetworkPolicyIngressRule{} } else { - yyv1960 := &yyv1957[yyj1957] - yyv1960.CodecDecodeSelf(d) + yyv1889 := &yyv1886[yyj1886] + yyv1889.CodecDecodeSelf(d) } } else { @@ -24402,115 +23574,115 @@ func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *co } } - if yyj1957 < len(yyv1957) { - yyv1957 = yyv1957[:yyj1957] - yyc1957 = true - } else if yyj1957 == 0 && yyv1957 == nil { - yyv1957 = []NetworkPolicyPort{} - yyc1957 = true + if yyj1886 < len(yyv1886) { + yyv1886 = yyv1886[:yyj1886] + yyc1886 = true + } else if yyj1886 == 0 && yyv1886 == nil { + yyv1886 = []NetworkPolicyIngressRule{} + yyc1886 = true } } - yyh1957.End() - if yyc1957 { - *v = yyv1957 + yyh1886.End() + if yyc1886 { + *v = yyv1886 } } -func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyPort(v []NetworkPolicyPort, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1961 := range v { + for _, yyv1890 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1962 := &yyv1961 - yy1962.CodecEncodeSelf(e) + yy1891 := &yyv1890 + yy1891.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyPort(v *[]NetworkPolicyPort, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1963 := *v - yyh1963, yyl1963 := z.DecSliceHelperStart() - var yyc1963 bool - if yyl1963 == 0 { - if yyv1963 == nil { - yyv1963 = []NetworkPolicyPeer{} - yyc1963 = true - } else if len(yyv1963) != 0 { - yyv1963 = yyv1963[:0] - yyc1963 = true + yyv1892 := *v + yyh1892, yyl1892 := z.DecSliceHelperStart() + var yyc1892 bool + if yyl1892 == 0 { + if yyv1892 == nil { + yyv1892 = []NetworkPolicyPort{} + yyc1892 = true + } else if len(yyv1892) != 0 { + yyv1892 = yyv1892[:0] + yyc1892 = true } - } else if yyl1963 > 0 { - var yyrr1963, yyrl1963 int - var yyrt1963 bool - if yyl1963 > cap(yyv1963) { + } else if yyl1892 > 0 { + var yyrr1892, yyrl1892 int + var yyrt1892 bool + if yyl1892 > cap(yyv1892) { - yyrg1963 := len(yyv1963) > 0 - yyv21963 := yyv1963 - yyrl1963, yyrt1963 = z.DecInferLen(yyl1963, z.DecBasicHandle().MaxInitLen, 16) - if yyrt1963 { - if yyrl1963 <= cap(yyv1963) { - yyv1963 = yyv1963[:yyrl1963] + yyrg1892 := len(yyv1892) > 0 + yyv21892 := yyv1892 + yyrl1892, yyrt1892 = z.DecInferLen(yyl1892, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1892 { + if yyrl1892 <= cap(yyv1892) { + yyv1892 = yyv1892[:yyrl1892] } else { - yyv1963 = make([]NetworkPolicyPeer, yyrl1963) + yyv1892 = make([]NetworkPolicyPort, yyrl1892) } } else { - yyv1963 = make([]NetworkPolicyPeer, yyrl1963) + yyv1892 = make([]NetworkPolicyPort, yyrl1892) } - yyc1963 = true - yyrr1963 = len(yyv1963) - if yyrg1963 { - copy(yyv1963, yyv21963) + yyc1892 = true + yyrr1892 = len(yyv1892) + if yyrg1892 { + copy(yyv1892, yyv21892) } - } else if yyl1963 != len(yyv1963) { - yyv1963 = yyv1963[:yyl1963] - yyc1963 = true + } else if yyl1892 != len(yyv1892) { + yyv1892 = yyv1892[:yyl1892] + yyc1892 = true } - yyj1963 := 0 - for ; yyj1963 < yyrr1963; yyj1963++ { - yyh1963.ElemContainerState(yyj1963) + yyj1892 := 0 + for ; yyj1892 < yyrr1892; yyj1892++ { + yyh1892.ElemContainerState(yyj1892) if r.TryDecodeAsNil() { - yyv1963[yyj1963] = NetworkPolicyPeer{} + yyv1892[yyj1892] = NetworkPolicyPort{} } else { - yyv1964 := &yyv1963[yyj1963] - yyv1964.CodecDecodeSelf(d) + yyv1893 := &yyv1892[yyj1892] + yyv1893.CodecDecodeSelf(d) } } - if yyrt1963 { - for ; yyj1963 < yyl1963; yyj1963++ { - yyv1963 = append(yyv1963, NetworkPolicyPeer{}) - yyh1963.ElemContainerState(yyj1963) + if yyrt1892 { + for ; yyj1892 < yyl1892; yyj1892++ { + yyv1892 = append(yyv1892, NetworkPolicyPort{}) + yyh1892.ElemContainerState(yyj1892) if r.TryDecodeAsNil() { - yyv1963[yyj1963] = NetworkPolicyPeer{} + yyv1892[yyj1892] = NetworkPolicyPort{} } else { - yyv1965 := &yyv1963[yyj1963] - yyv1965.CodecDecodeSelf(d) + yyv1894 := &yyv1892[yyj1892] + yyv1894.CodecDecodeSelf(d) } } } } else { - yyj1963 := 0 - for ; !r.CheckBreak(); yyj1963++ { + yyj1892 := 0 + for ; !r.CheckBreak(); yyj1892++ { - if yyj1963 >= len(yyv1963) { - yyv1963 = append(yyv1963, NetworkPolicyPeer{}) // var yyz1963 NetworkPolicyPeer - yyc1963 = true + if yyj1892 >= len(yyv1892) { + yyv1892 = append(yyv1892, NetworkPolicyPort{}) // var yyz1892 NetworkPolicyPort + yyc1892 = true } - yyh1963.ElemContainerState(yyj1963) - if yyj1963 < len(yyv1963) { + yyh1892.ElemContainerState(yyj1892) + if yyj1892 < len(yyv1892) { if r.TryDecodeAsNil() { - yyv1963[yyj1963] = NetworkPolicyPeer{} + yyv1892[yyj1892] = NetworkPolicyPort{} } else { - yyv1966 := &yyv1963[yyj1963] - yyv1966.CodecDecodeSelf(d) + yyv1895 := &yyv1892[yyj1892] + yyv1895.CodecDecodeSelf(d) } } else { @@ -24518,115 +23690,115 @@ func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *co } } - if yyj1963 < len(yyv1963) { - yyv1963 = yyv1963[:yyj1963] - yyc1963 = true - } else if yyj1963 == 0 && yyv1963 == nil { - yyv1963 = []NetworkPolicyPeer{} - yyc1963 = true + if yyj1892 < len(yyv1892) { + yyv1892 = yyv1892[:yyj1892] + yyc1892 = true + } else if yyj1892 == 0 && yyv1892 == nil { + yyv1892 = []NetworkPolicyPort{} + yyc1892 = true } } - yyh1963.End() - if yyc1963 { - *v = yyv1963 + yyh1892.End() + if yyc1892 { + *v = yyv1892 } } -func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicyPeer(v []NetworkPolicyPeer, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1967 := range v { + for _, yyv1896 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1968 := &yyv1967 - yy1968.CodecEncodeSelf(e) + yy1897 := &yyv1896 + yy1897.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicyPeer(v *[]NetworkPolicyPeer, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1969 := *v - yyh1969, yyl1969 := z.DecSliceHelperStart() - var yyc1969 bool - if yyl1969 == 0 { - if yyv1969 == nil { - yyv1969 = []NetworkPolicy{} - yyc1969 = true - } else if len(yyv1969) != 0 { - yyv1969 = yyv1969[:0] - yyc1969 = true + yyv1898 := *v + yyh1898, yyl1898 := z.DecSliceHelperStart() + var yyc1898 bool + if yyl1898 == 0 { + if yyv1898 == nil { + yyv1898 = []NetworkPolicyPeer{} + yyc1898 = true + } else if len(yyv1898) != 0 { + yyv1898 = yyv1898[:0] + yyc1898 = true } - } else if yyl1969 > 0 { - var yyrr1969, yyrl1969 int - var yyrt1969 bool - if yyl1969 > cap(yyv1969) { + } else if yyl1898 > 0 { + var yyrr1898, yyrl1898 int + var yyrt1898 bool + if yyl1898 > cap(yyv1898) { - yyrg1969 := len(yyv1969) > 0 - yyv21969 := yyv1969 - yyrl1969, yyrt1969 = z.DecInferLen(yyl1969, z.DecBasicHandle().MaxInitLen, 312) - if yyrt1969 { - if yyrl1969 <= cap(yyv1969) { - yyv1969 = yyv1969[:yyrl1969] + yyrg1898 := len(yyv1898) > 0 + yyv21898 := yyv1898 + yyrl1898, yyrt1898 = z.DecInferLen(yyl1898, z.DecBasicHandle().MaxInitLen, 16) + if yyrt1898 { + if yyrl1898 <= cap(yyv1898) { + yyv1898 = yyv1898[:yyrl1898] } else { - yyv1969 = make([]NetworkPolicy, yyrl1969) + yyv1898 = make([]NetworkPolicyPeer, yyrl1898) } } else { - yyv1969 = make([]NetworkPolicy, yyrl1969) + yyv1898 = make([]NetworkPolicyPeer, yyrl1898) } - yyc1969 = true - yyrr1969 = len(yyv1969) - if yyrg1969 { - copy(yyv1969, yyv21969) + yyc1898 = true + yyrr1898 = len(yyv1898) + if yyrg1898 { + copy(yyv1898, yyv21898) } - } else if yyl1969 != len(yyv1969) { - yyv1969 = yyv1969[:yyl1969] - yyc1969 = true + } else if yyl1898 != len(yyv1898) { + yyv1898 = yyv1898[:yyl1898] + yyc1898 = true } - yyj1969 := 0 - for ; yyj1969 < yyrr1969; yyj1969++ { - yyh1969.ElemContainerState(yyj1969) + yyj1898 := 0 + for ; yyj1898 < yyrr1898; yyj1898++ { + yyh1898.ElemContainerState(yyj1898) if r.TryDecodeAsNil() { - yyv1969[yyj1969] = NetworkPolicy{} + yyv1898[yyj1898] = NetworkPolicyPeer{} } else { - yyv1970 := &yyv1969[yyj1969] - yyv1970.CodecDecodeSelf(d) + yyv1899 := &yyv1898[yyj1898] + yyv1899.CodecDecodeSelf(d) } } - if yyrt1969 { - for ; yyj1969 < yyl1969; yyj1969++ { - yyv1969 = append(yyv1969, NetworkPolicy{}) - yyh1969.ElemContainerState(yyj1969) + if yyrt1898 { + for ; yyj1898 < yyl1898; yyj1898++ { + yyv1898 = append(yyv1898, NetworkPolicyPeer{}) + yyh1898.ElemContainerState(yyj1898) if r.TryDecodeAsNil() { - yyv1969[yyj1969] = NetworkPolicy{} + yyv1898[yyj1898] = NetworkPolicyPeer{} } else { - yyv1971 := &yyv1969[yyj1969] - yyv1971.CodecDecodeSelf(d) + yyv1900 := &yyv1898[yyj1898] + yyv1900.CodecDecodeSelf(d) } } } } else { - yyj1969 := 0 - for ; !r.CheckBreak(); yyj1969++ { + yyj1898 := 0 + for ; !r.CheckBreak(); yyj1898++ { - if yyj1969 >= len(yyv1969) { - yyv1969 = append(yyv1969, NetworkPolicy{}) // var yyz1969 NetworkPolicy - yyc1969 = true + if yyj1898 >= len(yyv1898) { + yyv1898 = append(yyv1898, NetworkPolicyPeer{}) // var yyz1898 NetworkPolicyPeer + yyc1898 = true } - yyh1969.ElemContainerState(yyj1969) - if yyj1969 < len(yyv1969) { + yyh1898.ElemContainerState(yyj1898) + if yyj1898 < len(yyv1898) { if r.TryDecodeAsNil() { - yyv1969[yyj1969] = NetworkPolicy{} + yyv1898[yyj1898] = NetworkPolicyPeer{} } else { - yyv1972 := &yyv1969[yyj1969] - yyv1972.CodecDecodeSelf(d) + yyv1901 := &yyv1898[yyj1898] + yyv1901.CodecDecodeSelf(d) } } else { @@ -24634,115 +23806,115 @@ func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978. } } - if yyj1969 < len(yyv1969) { - yyv1969 = yyv1969[:yyj1969] - yyc1969 = true - } else if yyj1969 == 0 && yyv1969 == nil { - yyv1969 = []NetworkPolicy{} - yyc1969 = true + if yyj1898 < len(yyv1898) { + yyv1898 = yyv1898[:yyj1898] + yyc1898 = true + } else if yyj1898 == 0 && yyv1898 == nil { + yyv1898 = []NetworkPolicyPeer{} + yyc1898 = true } } - yyh1969.End() - if yyc1969 { - *v = yyv1969 + yyh1898.End() + if yyc1898 { + *v = yyv1898 } } -func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { +func (x codecSelfer1234) encSliceNetworkPolicy(v []NetworkPolicy, e *codec1978.Encoder) { var h codecSelfer1234 z, r := codec1978.GenHelperEncoder(e) _, _, _ = h, z, r r.EncodeArrayStart(len(v)) - for _, yyv1973 := range v { + for _, yyv1902 := range v { z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy1974 := &yyv1973 - yy1974.CodecEncodeSelf(e) + yy1903 := &yyv1902 + yy1903.CodecEncodeSelf(e) } z.EncSendContainerState(codecSelfer_containerArrayEnd1234) } -func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { +func (x codecSelfer1234) decSliceNetworkPolicy(v *[]NetworkPolicy, d *codec1978.Decoder) { var h codecSelfer1234 z, r := codec1978.GenHelperDecoder(d) _, _, _ = h, z, r - yyv1975 := *v - yyh1975, yyl1975 := z.DecSliceHelperStart() - var yyc1975 bool - if yyl1975 == 0 { - if yyv1975 == nil { - yyv1975 = []StorageClass{} - yyc1975 = true - } else if len(yyv1975) != 0 { - yyv1975 = yyv1975[:0] - yyc1975 = true + yyv1904 := *v + yyh1904, yyl1904 := z.DecSliceHelperStart() + var yyc1904 bool + if yyl1904 == 0 { + if yyv1904 == nil { + yyv1904 = []NetworkPolicy{} + yyc1904 = true + } else if len(yyv1904) != 0 { + yyv1904 = yyv1904[:0] + yyc1904 = true } - } else if yyl1975 > 0 { - var yyrr1975, yyrl1975 int - var yyrt1975 bool - if yyl1975 > cap(yyv1975) { + } else if yyl1904 > 0 { + var yyrr1904, yyrl1904 int + var yyrt1904 bool + if yyl1904 > cap(yyv1904) { - yyrg1975 := len(yyv1975) > 0 - yyv21975 := yyv1975 - yyrl1975, yyrt1975 = z.DecInferLen(yyl1975, z.DecBasicHandle().MaxInitLen, 280) - if yyrt1975 { - if yyrl1975 <= cap(yyv1975) { - yyv1975 = yyv1975[:yyrl1975] + yyrg1904 := len(yyv1904) > 0 + yyv21904 := yyv1904 + yyrl1904, yyrt1904 = z.DecInferLen(yyl1904, z.DecBasicHandle().MaxInitLen, 312) + if yyrt1904 { + if yyrl1904 <= cap(yyv1904) { + yyv1904 = yyv1904[:yyrl1904] } else { - yyv1975 = make([]StorageClass, yyrl1975) + yyv1904 = make([]NetworkPolicy, yyrl1904) } } else { - yyv1975 = make([]StorageClass, yyrl1975) + yyv1904 = make([]NetworkPolicy, yyrl1904) } - yyc1975 = true - yyrr1975 = len(yyv1975) - if yyrg1975 { - copy(yyv1975, yyv21975) + yyc1904 = true + yyrr1904 = len(yyv1904) + if yyrg1904 { + copy(yyv1904, yyv21904) } - } else if yyl1975 != len(yyv1975) { - yyv1975 = yyv1975[:yyl1975] - yyc1975 = true + } else if yyl1904 != len(yyv1904) { + yyv1904 = yyv1904[:yyl1904] + yyc1904 = true } - yyj1975 := 0 - for ; yyj1975 < yyrr1975; yyj1975++ { - yyh1975.ElemContainerState(yyj1975) + yyj1904 := 0 + for ; yyj1904 < yyrr1904; yyj1904++ { + yyh1904.ElemContainerState(yyj1904) if r.TryDecodeAsNil() { - yyv1975[yyj1975] = StorageClass{} + yyv1904[yyj1904] = NetworkPolicy{} } else { - yyv1976 := &yyv1975[yyj1975] - yyv1976.CodecDecodeSelf(d) + yyv1905 := &yyv1904[yyj1904] + yyv1905.CodecDecodeSelf(d) } } - if yyrt1975 { - for ; yyj1975 < yyl1975; yyj1975++ { - yyv1975 = append(yyv1975, StorageClass{}) - yyh1975.ElemContainerState(yyj1975) + if yyrt1904 { + for ; yyj1904 < yyl1904; yyj1904++ { + yyv1904 = append(yyv1904, NetworkPolicy{}) + yyh1904.ElemContainerState(yyj1904) if r.TryDecodeAsNil() { - yyv1975[yyj1975] = StorageClass{} + yyv1904[yyj1904] = NetworkPolicy{} } else { - yyv1977 := &yyv1975[yyj1975] - yyv1977.CodecDecodeSelf(d) + yyv1906 := &yyv1904[yyj1904] + yyv1906.CodecDecodeSelf(d) } } } } else { - yyj1975 := 0 - for ; !r.CheckBreak(); yyj1975++ { + yyj1904 := 0 + for ; !r.CheckBreak(); yyj1904++ { - if yyj1975 >= len(yyv1975) { - yyv1975 = append(yyv1975, StorageClass{}) // var yyz1975 StorageClass - yyc1975 = true + if yyj1904 >= len(yyv1904) { + yyv1904 = append(yyv1904, NetworkPolicy{}) // var yyz1904 NetworkPolicy + yyc1904 = true } - yyh1975.ElemContainerState(yyj1975) - if yyj1975 < len(yyv1975) { + yyh1904.ElemContainerState(yyj1904) + if yyj1904 < len(yyv1904) { if r.TryDecodeAsNil() { - yyv1975[yyj1975] = StorageClass{} + yyv1904[yyj1904] = NetworkPolicy{} } else { - yyv1978 := &yyv1975[yyj1975] - yyv1978.CodecDecodeSelf(d) + yyv1907 := &yyv1904[yyj1904] + yyv1907.CodecDecodeSelf(d) } } else { @@ -24750,16 +23922,16 @@ func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.De } } - if yyj1975 < len(yyv1975) { - yyv1975 = yyv1975[:yyj1975] - yyc1975 = true - } else if yyj1975 == 0 && yyv1975 == nil { - yyv1975 = []StorageClass{} - yyc1975 = true + if yyj1904 < len(yyv1904) { + yyv1904 = yyv1904[:yyj1904] + yyc1904 = true + } else if yyj1904 == 0 && yyv1904 == nil { + yyv1904 = []NetworkPolicy{} + yyc1904 = true } } - yyh1975.End() - if yyc1975 { - *v = yyv1975 + yyh1904.End() + if yyc1904 { + *v = yyv1904 } } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types.go index 151703f5f75b..64f12c786fb9 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types.go @@ -34,7 +34,7 @@ type ScaleStatus struct { // actual number of observed instances of the scaled object. Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - // label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // label query over pods that should match the replicas count. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` // label selector for pods that should match the replicas count. This is a serializated @@ -42,7 +42,7 @@ type ScaleStatus struct { // avoid introspection in the clients. The string will be in the same format as the // query-param syntax. If the target type only supports map-based selectors, both this // field and map-based selector field are populated. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } @@ -52,13 +52,13 @@ type ScaleStatus struct { // represents a scaling request for a resource. type Scale struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata. + // Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata. v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only. Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -69,9 +69,9 @@ type ReplicationControllerDummy struct { // SubresourceReference contains enough information to let you inspect or modify the referred subresource. type SubresourceReference struct { - // Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // Kind of the referent; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names + // Name of the referent; More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` // API version of the referent APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` @@ -145,10 +145,10 @@ type HorizontalPodAutoscalerStatus struct { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { unversioned.TypeMeta `json:",inline"` - // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // current information about the autoscaler. @@ -410,14 +410,14 @@ type DaemonSetSpec struct { // Selector is a label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` // Template is the object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` // TODO(madhusudancs): Uncomment while implementing DaemonSet updates. @@ -447,17 +447,17 @@ const ( type DaemonSetStatus struct { // CurrentNumberScheduled is the number of nodes that are running at least 1 // daemon pod and are supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` // NumberMisscheduled is the number of nodes that are running the daemon pod, but are // not supposed to run the daemon pod. - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` // DesiredNumberScheduled is the total number of nodes that should be running the daemon // pod (including nodes correctly running the daemon pod). - // More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md + // More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` } @@ -467,18 +467,18 @@ type DaemonSetStatus struct { type DaemonSet struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired behavior of this daemon set. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -486,7 +486,7 @@ type DaemonSet struct { type DaemonSetList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of daemon sets. @@ -497,7 +497,7 @@ type DaemonSetList struct { type ThirdPartyResourceDataList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of ThirdpartyResourceData. @@ -510,15 +510,15 @@ type ThirdPartyResourceDataList struct { type Job struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is a structure defining the expected behavior of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is a structure describing current status of a job. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -526,7 +526,7 @@ type Job struct { type JobList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Job. @@ -540,7 +540,7 @@ type JobSpec struct { // run at any given time. The actual number of pods running in steady state will // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), // i.e. when the work left to do is less than max parallelism. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` // Completions specifies the desired number of successfully finished pods the @@ -548,7 +548,7 @@ type JobSpec struct { // pod signals the success of all pods, and allows parallelism to have any positive // value. Setting to 1 means that parallelism is limited to 1 and the success of that // pod signals the success of the job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` // Optional duration in seconds relative to the startTime that the job may be active @@ -557,19 +557,19 @@ type JobSpec struct { // Selector is a label query over pods that should match the pod count. // Normally, the system sets this field for you. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` // AutoSelector controls generation of pod labels and pod selectors. // It was not present in the original extensions/v1beta1 Job definition, but exists // to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite // meaning as, ManualSelector. - // More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md + // More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md AutoSelector *bool `json:"autoSelector,omitempty" protobuf:"varint,5,opt,name=autoSelector"` // Template is the object that describes the pod that will be created when // executing a job. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` } @@ -577,7 +577,7 @@ type JobSpec struct { type JobStatus struct { // Conditions represent the latest available observations of an object's current state. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` // StartTime represents time when the job was acknowledged by the Job Manager. @@ -635,15 +635,15 @@ type JobCondition struct { type Ingress struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -651,7 +651,7 @@ type Ingress struct { type IngressList struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is the list of Ingress. @@ -852,18 +852,18 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the most recently observed status of the ReplicaSet. // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -871,11 +871,11 @@ type ReplicaSet struct { type ReplicaSetList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ReplicaSets. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -884,25 +884,25 @@ type ReplicaSetSpec struct { // Replicas is the number of desired replicas. // This is a pointer to distinguish between explicit zero and unspecified. // Defaults to 1. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // Selector is a label query over pods that should match the replica count. // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors Selector *LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } // ReplicaSetStatus represents the current status of a ReplicaSet. type ReplicaSetStatus struct { // Replicas is the most recently oberved number of replicas. - // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller + // More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` // The number of pods that have labels matching the labels of the pod template of the replicaset. @@ -923,7 +923,7 @@ type ReplicaSetStatus struct { type PodSecurityPolicy struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec defines the policy enforced. @@ -1014,7 +1014,7 @@ type SELinuxStrategyOptions struct { // type is the strategy that will dictate the allowable labels that may be set. Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` // seLinuxOptions required to run as; required for MustRunAs - // More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context + // More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md#security-context SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` } @@ -1102,7 +1102,7 @@ const ( type PodSecurityPolicyList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of schema objects. @@ -1112,7 +1112,7 @@ type PodSecurityPolicyList struct { type NetworkPolicy struct { unversioned.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior for this NetworkPolicy. @@ -1193,42 +1193,9 @@ type NetworkPolicyPeer struct { type NetworkPolicyList struct { unversioned.TypeMeta `json:",inline"` // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + // More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of schema objects. Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } - -// +genclient=true -// +nonNamespaced=true - -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -type StorageClass struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Provisioner indicates the type of the provisioner. - Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` -} - -// StorageClassList is a collection of storage classes. -type StorageClassList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of StorageClasses - Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go index f3918db62186..572b3d7370f6 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/types_swagger_doc_generated.go @@ -65,9 +65,9 @@ func (CustomMetricTarget) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -76,7 +76,7 @@ func (DaemonSet) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is a list of daemon sets.", } @@ -86,8 +86,8 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "Selector is a label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template", + "selector": "Selector is a label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", + "template": "Template is the object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template", } func (DaemonSetSpec) SwaggerDoc() map[string]string { @@ -96,9 +96,9 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { var map_DaemonSetStatus = map[string]string{ "": "DaemonSetStatus represents the current status of a daemon set.", - "currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", - "numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", - "desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/HEAD/docs/admin/daemons.md", + "currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md", + "numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md", + "desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: http://releases.k8s.io/release-1.4/docs/admin/daemons.md", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -217,8 +217,8 @@ func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -283,9 +283,9 @@ func (IDRange) SwaggerDoc() map[string]string { var map_Ingress = map[string]string{ "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -304,7 +304,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } @@ -361,9 +361,9 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec is a structure defining the expected behavior of a job. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is a structure describing current status of a job. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -386,7 +386,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is the list of Job.", } @@ -396,12 +396,12 @@ func (JobList) SwaggerDoc() map[string]string { var map_JobSpec = map[string]string{ "": "JobSpec describes how the job execution will look like.", - "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", - "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "parallelism": "Parallelism specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", + "completions": "Completions specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", "activeDeadlineSeconds": "Optional duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", - "autoSelector": "AutoSelector controls generation of pod labels and pod selectors. It was not present in the original extensions/v1beta1 Job definition, but exists to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite meaning as, ManualSelector. More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md", - "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "selector": "Selector is a label query over pods that should match the pod count. Normally, the system sets this field for you. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", + "autoSelector": "AutoSelector controls generation of pod labels and pod selectors. It was not present in the original extensions/v1beta1 Job definition, but exists to allow conversion from batch/v1 Jobs, where it corresponds to, but has the opposite meaning as, ManualSelector. More info: http://releases.k8s.io/release-1.4/docs/design/selector-generation.md", + "template": "Template is the object that describes the pod that will be created when executing a job. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", } func (JobSpec) SwaggerDoc() map[string]string { @@ -410,7 +410,7 @@ func (JobSpec) SwaggerDoc() map[string]string { var map_JobStatus = map[string]string{ "": "JobStatus represents the current state of a Job.", - "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/HEAD/docs/user-guide/jobs.md", + "conditions": "Conditions represent the latest available observations of an object's current state. More info: http://releases.k8s.io/release-1.4/docs/user-guide/jobs.md", "startTime": "StartTime represents time when the job was acknowledged by the Job Manager. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "completionTime": "CompletionTime represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", "active": "Active is the number of actively running pods.", @@ -457,7 +457,7 @@ func (ListOptions) SwaggerDoc() map[string]string { } var map_NetworkPolicy = map[string]string{ - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -477,7 +477,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "Network Policy List is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -514,7 +514,7 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "Pod Security Policy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -524,7 +524,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "Pod Security Policy List is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -556,9 +556,9 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "ReplicaSet represents the configuration of a ReplicaSet.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -567,8 +567,8 @@ func (ReplicaSet) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md", + "metadata": "Standard list metadata. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md", } func (ReplicaSetList) SwaggerDoc() map[string]string { @@ -577,9 +577,9 @@ func (ReplicaSetList) SwaggerDoc() map[string]string { var map_ReplicaSetSpec = map[string]string{ "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller", - "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller", + "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#pod-template", } func (ReplicaSetSpec) SwaggerDoc() map[string]string { @@ -588,7 +588,7 @@ func (ReplicaSetSpec) SwaggerDoc() map[string]string { var map_ReplicaSetStatus = map[string]string{ "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#what-is-a-replication-controller", + "replicas": "Replicas is the most recently oberved number of replicas. More info: http://releases.k8s.io/release-1.4/docs/user-guide/replication-controller.md#what-is-a-replication-controller", "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", "readyReplicas": "The number of ready replicas for this replica set.", "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", @@ -637,7 +637,7 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { var map_SELinuxStrategyOptions = map[string]string{ "": "SELinux Strategy Options defines the strategy type and any options used to create the strategy.", "rule": "type is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: http://releases.k8s.io/HEAD/docs/design/security_context.md#security-context", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: http://releases.k8s.io/release-1.4/docs/design/security_context.md#security-context", } func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { @@ -646,9 +646,9 @@ func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { @@ -667,39 +667,18 @@ func (ScaleSpec) SwaggerDoc() map[string]string { var map_ScaleStatus = map[string]string{ "": "represents the current status of a scale subresource.", "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "selector": "label query over pods that should match the replicas count. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: http://releases.k8s.io/release-1.4/docs/user-guide/labels.md#label-selectors", } func (ScaleStatus) SwaggerDoc() map[string]string { return map_ScaleStatus } -var map_StorageClass = map[string]string{ - "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", -} - -func (StorageClass) SwaggerDoc() map[string]string { - return map_StorageClass -} - -var map_StorageClassList = map[string]string{ - "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", -} - -func (StorageClassList) SwaggerDoc() map[string]string { - return map_StorageClassList -} - var map_SubresourceReference = map[string]string{ "": "SubresourceReference contains enough information to let you inspect or modify the referred subresource.", - "kind": "Kind of the referent; More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "name": "Name of the referent; More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names", + "kind": "Kind of the referent; More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#types-kinds", + "name": "Name of the referent; More info: http://releases.k8s.io/release-1.4/docs/user-guide/identifiers.md#names", "apiVersion": "API version of the referent", "subresource": "Subresource name of the referent", } @@ -741,7 +720,7 @@ func (ThirdPartyResourceData) SwaggerDoc() map[string]string { var map_ThirdPartyResourceDataList = map[string]string{ "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", - "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: http://releases.k8s.io/release-1.4/docs/devel/api-conventions.md#metadata", "items": "Items is the list of ThirdpartyResourceData.", } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/zz_generated.conversion.go index 23cd760e15fc..a6f0e4301ad4 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/zz_generated.conversion.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/zz_generated.conversion.go @@ -163,10 +163,6 @@ func RegisterConversions(scheme *runtime.Scheme) error { Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec, Convert_v1beta1_ScaleStatus_To_extensions_ScaleStatus, Convert_extensions_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_StorageClass_To_extensions_StorageClass, - Convert_extensions_StorageClass_To_v1beta1_StorageClass, - Convert_v1beta1_StorageClassList_To_extensions_StorageClassList, - Convert_extensions_StorageClassList_To_v1beta1_StorageClassList, Convert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions, Convert_extensions_SupplementalGroupsStrategyOptions_To_v1beta1_SupplementalGroupsStrategyOptions, Convert_v1beta1_ThirdPartyResource_To_extensions_ThirdPartyResource, @@ -2424,90 +2420,6 @@ func Convert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in *extensions.ScaleSpec, return autoConvert_extensions_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) } -func autoConvert_v1beta1_StorageClass_To_extensions_StorageClass(in *StorageClass, out *extensions.StorageClass, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - out.Provisioner = in.Provisioner - out.Parameters = in.Parameters - return nil -} - -func Convert_v1beta1_StorageClass_To_extensions_StorageClass(in *StorageClass, out *extensions.StorageClass, s conversion.Scope) error { - return autoConvert_v1beta1_StorageClass_To_extensions_StorageClass(in, out, s) -} - -func autoConvert_extensions_StorageClass_To_v1beta1_StorageClass(in *extensions.StorageClass, out *StorageClass, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - out.Provisioner = in.Provisioner - out.Parameters = in.Parameters - return nil -} - -func Convert_extensions_StorageClass_To_v1beta1_StorageClass(in *extensions.StorageClass, out *StorageClass, s conversion.Scope) error { - return autoConvert_extensions_StorageClass_To_v1beta1_StorageClass(in, out, s) -} - -func autoConvert_v1beta1_StorageClassList_To_extensions_StorageClassList(in *StorageClassList, out *extensions.StorageClassList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]extensions.StorageClass, len(*in)) - for i := range *in { - if err := Convert_v1beta1_StorageClass_To_extensions_StorageClass(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_StorageClassList_To_extensions_StorageClassList(in *StorageClassList, out *extensions.StorageClassList, s conversion.Scope) error { - return autoConvert_v1beta1_StorageClassList_To_extensions_StorageClassList(in, out, s) -} - -func autoConvert_extensions_StorageClassList_To_v1beta1_StorageClassList(in *extensions.StorageClassList, out *StorageClassList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StorageClass, len(*in)) - for i := range *in { - if err := Convert_extensions_StorageClass_To_v1beta1_StorageClass(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_extensions_StorageClassList_To_v1beta1_StorageClassList(in *extensions.StorageClassList, out *StorageClassList, s conversion.Scope) error { - return autoConvert_extensions_StorageClassList_To_v1beta1_StorageClassList(in, out, s) -} - func autoConvert_v1beta1_SupplementalGroupsStrategyOptions_To_extensions_SupplementalGroupsStrategyOptions(in *SupplementalGroupsStrategyOptions, out *extensions.SupplementalGroupsStrategyOptions, s conversion.Scope) error { out.Rule = extensions.SupplementalGroupsStrategyType(in.Rule) if in.Ranges != nil { diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go index ecad11e764e4..eae60652e09e 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/v1beta1/zz_generated.deepcopy.go @@ -100,8 +100,6 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Scale, InType: reflect.TypeOf(&Scale{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SubresourceReference, InType: reflect.TypeOf(&SubresourceReference{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, @@ -1339,49 +1337,6 @@ func DeepCopy_v1beta1_ScaleStatus(in interface{}, out interface{}, c *conversion } } -func DeepCopy_v1beta1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClass) - out := out.(*StorageClass) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - out.Provisioner = in.Provisioner - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.Parameters = nil - } - return nil - } -} - -func DeepCopy_v1beta1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClassList) - out := out.(*StorageClassList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StorageClass, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil - } -} - func DeepCopy_v1beta1_SubresourceReference(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*SubresourceReference) diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/zz_generated.deepcopy.go index 01a597c98d49..02ba202bb359 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/extensions/zz_generated.deepcopy.go @@ -86,8 +86,6 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error { conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_Scale, InType: reflect.TypeOf(&Scale{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleSpec, InType: reflect.TypeOf(&ScaleSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ScaleStatus, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_SupplementalGroupsStrategyOptions, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResource, InType: reflect.TypeOf(&ThirdPartyResource{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_extensions_ThirdPartyResourceData, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, @@ -981,49 +979,6 @@ func DeepCopy_extensions_ScaleStatus(in interface{}, out interface{}, c *convers } } -func DeepCopy_extensions_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClass) - out := out.(*StorageClass) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - out.Provisioner = in.Provisioner - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string) - for key, val := range *in { - (*out)[key] = val - } - } else { - out.Parameters = nil - } - return nil - } -} - -func DeepCopy_extensions_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*StorageClassList) - out := out.(*StorageClassList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StorageClass, len(*in)) - for i := range *in { - if err := DeepCopy_extensions_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil - } -} - func DeepCopy_extensions_SupplementalGroupsStrategyOptions(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*SupplementalGroupsStrategyOptions) diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/doc.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/doc.go similarity index 93% rename from vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/doc.go rename to vendor/k8s.io/client-go/1.4/pkg/apis/storage/doc.go index 7a45fb7bb8ca..a7eb30b643bd 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/doc.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/doc.go @@ -15,5 +15,5 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register - -package federation +// +groupName=storage.k8s.io +package storage diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/install/install.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/install/install.go similarity index 88% rename from vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/install/install.go rename to vendor/k8s.io/client-go/1.4/pkg/apis/storage/install/install.go index 9be31dbe53ba..a8d10ff001aa 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/install/install.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/install/install.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. package install import ( @@ -26,13 +28,13 @@ import ( "k8s.io/client-go/1.4/pkg/api/unversioned" "k8s.io/client-go/1.4/pkg/apimachinery" "k8s.io/client-go/1.4/pkg/apimachinery/registered" - "k8s.io/client-go/1.4/pkg/federation/apis/federation" - "k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1" + "k8s.io/client-go/1.4/pkg/apis/storage" + "k8s.io/client-go/1.4/pkg/apis/storage/v1beta1" "k8s.io/client-go/1.4/pkg/runtime" "k8s.io/client-go/1.4/pkg/util/sets" ) -const importPrefix = "k8s.io/client-go/1.4/pkg/federation/apis/federation" +const importPrefix = "k8s.io/client-go/1.4/pkg/apis/storage" var accessor = meta.NewAccessor() @@ -48,7 +50,7 @@ func init() { } } if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", federation.GroupName) + glog.V(4).Infof("No version is registered for group %v", storage.GroupName) return } @@ -89,7 +91,7 @@ func newRESTMapper(externalVersions []unversioned.GroupVersion) meta.RESTMapper // the list of kinds that are scoped at the root of the api hierarchy // if a kind is not enumerated here, it is assumed to have a namespace scope rootScoped := sets.NewString( - "Cluster", + "StorageClass", ) ignoredKinds := sets.NewString() @@ -107,14 +109,14 @@ func interfacesFor(version unversioned.GroupVersion) (*meta.VersionInterfaces, e MetadataAccessor: accessor, }, nil default: - g, _ := registered.Group(federation.GroupName) + g, _ := registered.Group(storage.GroupName) return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, g.GroupVersions) } } func addVersionsToScheme(externalVersions ...unversioned.GroupVersion) { // add the internal version to Scheme - if err := federation.AddToScheme(api.Scheme); err != nil { + if err := storage.AddToScheme(api.Scheme); err != nil { // Programmer error, detect immediately panic(err) } diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/register.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/register.go similarity index 74% rename from vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/register.go rename to vendor/k8s.io/client-go/1.4/pkg/apis/storage/register.go index c6f23e01165c..f2aabaf4efc4 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/register.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/register.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package federation +package storage import ( "k8s.io/client-go/1.4/pkg/api" @@ -23,17 +23,17 @@ import ( ) // GroupName is the group name use in this package -const GroupName = "federation" +const GroupName = "storage.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} -// Kind takes an unqualified kind and returns back a Group qualified GroupKind +// Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) unversioned.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } -// Resource takes an unqualified resource and returns back a Group qualified GroupResource +// Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) unversioned.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } @@ -45,13 +45,12 @@ var ( func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Cluster{}, - &ClusterList{}, &api.ListOptions{}, &api.DeleteOptions{}, + &api.ExportOptions{}, + + &StorageClass{}, + &StorageClassList{}, ) return nil } - -func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/types.generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/types.generated.go new file mode 100644 index 000000000000..569e68715048 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/types.generated.go @@ -0,0 +1,900 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package storage + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg2_api "k8s.io/client-go/1.4/pkg/api" + pkg1_unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" + pkg3_types "k8s.io/client-go/1.4/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg2_api.ObjectMeta + var v1 pkg1_unversioned.TypeMeta + var v2 pkg3_types.UID + var v3 time.Time + _, _, _, _ = v0, v1, v2, v3 + } +} + +func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = len(x.Parameters) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yy10.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.ObjectMeta + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("provisioner")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Parameters == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncMapStringStringV(x.Parameters, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("parameters")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Parameters == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncMapStringStringV(x.Parameters, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct19 := r.ContainerType() + if yyct19 == codecSelferValueTypeMap1234 { + yyl19 := r.ReadMapStart() + if yyl19 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl19, d) + } + } else if yyct19 == codecSelferValueTypeArray1234 { + yyl19 := r.ReadArrayStart() + if yyl19 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl19, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys20Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys20Slc + var yyhl20 bool = l >= 0 + for yyj20 := 0; ; yyj20++ { + if yyhl20 { + if yyj20 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys20Slc = r.DecodeBytes(yys20Slc, true, true) + yys20 := string(yys20Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys20 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv23 := &x.ObjectMeta + yyv23.CodecDecodeSelf(d) + } + case "provisioner": + if r.TryDecodeAsNil() { + x.Provisioner = "" + } else { + x.Provisioner = string(r.DecodeString()) + } + case "parameters": + if r.TryDecodeAsNil() { + x.Parameters = nil + } else { + yyv25 := &x.Parameters + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + z.F.DecMapStringStringX(yyv25, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys20) + } // end switch yys20 + } // end for yyj20 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj27 int + var yyb27 bool + var yyhl27 bool = l >= 0 + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_api.ObjectMeta{} + } else { + yyv30 := &x.ObjectMeta + yyv30.CodecDecodeSelf(d) + } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Provisioner = "" + } else { + x.Provisioner = string(r.DecodeString()) + } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Parameters = nil + } else { + yyv32 := &x.Parameters + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + z.F.DecMapStringStringX(yyv32, false, d) + } + } + for { + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj27-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym34 := z.EncBinary() + _ = yym34 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep35 := !z.EncBinary() + yy2arr35 := z.EncBasicHandle().StructToArray + var yyq35 [4]bool + _, _, _ = yysep35, yyq35, yy2arr35 + const yyr35 bool = false + yyq35[0] = x.Kind != "" + yyq35[1] = x.APIVersion != "" + yyq35[2] = true + var yynn35 int + if yyr35 || yy2arr35 { + r.EncodeArrayStart(4) + } else { + yynn35 = 1 + for _, b := range yyq35 { + if b { + yynn35++ + } + } + r.EncodeMapStart(yynn35) + yynn35 = 0 + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq35[0] { + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq35[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq35[1] { + yym40 := z.EncBinary() + _ = yym40 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq35[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq35[2] { + yy43 := &x.ListMeta + yym44 := z.EncBinary() + _ = yym44 + if false { + } else if z.HasExtensions() && z.EncExt(yy43) { + } else { + z.EncFallback(yy43) + } + } else { + r.EncodeNil() + } + } else { + if yyq35[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy45 := &x.ListMeta + yym46 := z.EncBinary() + _ = yym46 + if false { + } else if z.HasExtensions() && z.EncExt(yy45) { + } else { + z.EncFallback(yy45) + } + } + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym48 := z.EncBinary() + _ = yym48 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym49 := z.EncBinary() + _ = yym49 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym50 := z.DecBinary() + _ = yym50 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct51 := r.ContainerType() + if yyct51 == codecSelferValueTypeMap1234 { + yyl51 := r.ReadMapStart() + if yyl51 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl51, d) + } + } else if yyct51 == codecSelferValueTypeArray1234 { + yyl51 := r.ReadArrayStart() + if yyl51 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl51, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys52Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys52Slc + var yyhl52 bool = l >= 0 + for yyj52 := 0; ; yyj52++ { + if yyhl52 { + if yyj52 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys52Slc = r.DecodeBytes(yys52Slc, true, true) + yys52 := string(yys52Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys52 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv55 := &x.ListMeta + yym56 := z.DecBinary() + _ = yym56 + if false { + } else if z.HasExtensions() && z.DecExt(yyv55) { + } else { + z.DecFallback(yyv55, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv57 := &x.Items + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv57), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys52) + } // end switch yys52 + } // end for yyj52 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj59 int + var yyb59 bool + var yyhl59 bool = l >= 0 + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv62 := &x.ListMeta + yym63 := z.DecBinary() + _ = yym63 + if false { + } else if z.HasExtensions() && z.DecExt(yyv62) { + } else { + z.DecFallback(yyv62, false) + } + } + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv64 := &x.Items + yym65 := z.DecBinary() + _ = yym65 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv64), d) + } + } + for { + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj59-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv66 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy67 := &yyv66 + yy67.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv68 := *v + yyh68, yyl68 := z.DecSliceHelperStart() + var yyc68 bool + if yyl68 == 0 { + if yyv68 == nil { + yyv68 = []StorageClass{} + yyc68 = true + } else if len(yyv68) != 0 { + yyv68 = yyv68[:0] + yyc68 = true + } + } else if yyl68 > 0 { + var yyrr68, yyrl68 int + var yyrt68 bool + if yyl68 > cap(yyv68) { + + yyrg68 := len(yyv68) > 0 + yyv268 := yyv68 + yyrl68, yyrt68 = z.DecInferLen(yyl68, z.DecBasicHandle().MaxInitLen, 280) + if yyrt68 { + if yyrl68 <= cap(yyv68) { + yyv68 = yyv68[:yyrl68] + } else { + yyv68 = make([]StorageClass, yyrl68) + } + } else { + yyv68 = make([]StorageClass, yyrl68) + } + yyc68 = true + yyrr68 = len(yyv68) + if yyrg68 { + copy(yyv68, yyv268) + } + } else if yyl68 != len(yyv68) { + yyv68 = yyv68[:yyl68] + yyc68 = true + } + yyj68 := 0 + for ; yyj68 < yyrr68; yyj68++ { + yyh68.ElemContainerState(yyj68) + if r.TryDecodeAsNil() { + yyv68[yyj68] = StorageClass{} + } else { + yyv69 := &yyv68[yyj68] + yyv69.CodecDecodeSelf(d) + } + + } + if yyrt68 { + for ; yyj68 < yyl68; yyj68++ { + yyv68 = append(yyv68, StorageClass{}) + yyh68.ElemContainerState(yyj68) + if r.TryDecodeAsNil() { + yyv68[yyj68] = StorageClass{} + } else { + yyv70 := &yyv68[yyj68] + yyv70.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj68 := 0 + for ; !r.CheckBreak(); yyj68++ { + + if yyj68 >= len(yyv68) { + yyv68 = append(yyv68, StorageClass{}) // var yyz68 StorageClass + yyc68 = true + } + yyh68.ElemContainerState(yyj68) + if yyj68 < len(yyv68) { + if r.TryDecodeAsNil() { + yyv68[yyj68] = StorageClass{} + } else { + yyv71 := &yyv68[yyj68] + yyv71.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj68 < len(yyv68) { + yyv68 = yyv68[:yyj68] + yyc68 = true + } else if yyj68 == 0 && yyv68 == nil { + yyv68 = []StorageClass{} + yyc68 = true + } + } + yyh68.End() + if yyc68 { + *v = yyv68 + } +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/types.go new file mode 100644 index 000000000000..6d60fef7ba70 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/types.go @@ -0,0 +1,60 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/client-go/1.4/pkg/api" + "k8s.io/client-go/1.4/pkg/api/unversioned" +) + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes a named "class" of storage offered in a cluster. +// Different classes might map to quality-of-service levels, or to backup policies, +// or to arbitrary policies determined by the cluster administrators. Kubernetes +// itself is unopinionated about what classes represent. This concept is sometimes +// called "profiles" in other storage systems. +// The name of a StorageClass object is significant, and is how users can request a particular class. +type StorageClass struct { + unversioned.TypeMeta `json:",inline"` + api.ObjectMeta `json:"metadata,omitempty"` + + // provisioner is the driver expected to handle this StorageClass. + // This is an optionally-prefixed name, like a label key. + // For example: "kubernetes.io/gce-pd" or "kubernetes.io/aws-ebs". + // This value may not be empty. + Provisioner string `json:"provisioner"` + + // parameters holds parameters for the provisioner. + // These values are opaque to the system and are passed directly + // to the provisioner. The only validation done on keys is that they are + // not empty. The maximum number of parameters is + // 512, with a cumulative max size of 256K + Parameters map[string]string `json:"parameters,omitempty"` +} + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items"` +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/doc.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/doc.go similarity index 87% rename from vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/doc.go rename to vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/doc.go index 3a4f89c982eb..fd556e8b6fea 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/doc.go @@ -15,6 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=k8s.io/kubernetes/federation/apis/federation - +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/storage +// +groupName=storage.k8s.io package v1beta1 diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/generated.pb.go new file mode 100644 index 000000000000..9295106ad6a2 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/generated.pb.go @@ -0,0 +1,729 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/pkg/apis/storage/v1beta1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func init() { + proto.RegisterType((*StorageClass)(nil), "k8s.io.client-go.1.4.pkg.apis.storage.v1beta1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.client-go.1.4.pkg.apis.storage.v1beta1.StorageClassList") +} +func (m *StorageClass) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClass) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n1 + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.Provisioner))) + i += copy(data[i:], m.Provisioner) + if len(m.Parameters) > 0 { + for k := range m.Parameters { + data[i] = 0x1a + i++ + v := m.Parameters[k] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(data, i, uint64(mapSize)) + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(len(k))) + i += copy(data[i:], k) + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(len(v))) + i += copy(data[i:], v) + } + } + return i, nil +} + +func (m *StorageClassList) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *StorageClassList) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, uint64(msg.Size())) + n, err := msg.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Generated(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *StorageClass) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageClass) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(data[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(data[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorGenerated = []byte{ + // 455 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x92, 0x4d, 0x6b, 0x13, 0x41, + 0x18, 0xc7, 0xb3, 0x09, 0xc1, 0x76, 0xa2, 0x18, 0x46, 0x0f, 0x61, 0x0f, 0x69, 0xe9, 0xa9, 0x8a, + 0x9d, 0x21, 0x85, 0x42, 0x28, 0x78, 0x59, 0x11, 0x14, 0x14, 0xcb, 0x7a, 0x11, 0xa1, 0x87, 0xd9, + 0xe4, 0x71, 0x1d, 0x37, 0xbb, 0xb3, 0xcc, 0xcb, 0x42, 0xc1, 0x83, 0x1f, 0xc1, 0x8f, 0x95, 0x63, + 0x8e, 0x1e, 0xa4, 0x68, 0xfd, 0x22, 0xce, 0xbe, 0xd8, 0x5d, 0xb2, 0x59, 0x11, 0x0f, 0x0f, 0xcc, + 0xdb, 0xef, 0xff, 0xfc, 0x9f, 0x3f, 0x83, 0xce, 0xa3, 0xb9, 0x22, 0x5c, 0xd0, 0xc8, 0x04, 0x20, + 0x13, 0xd0, 0xa0, 0x68, 0x1a, 0x85, 0x94, 0xa5, 0x5c, 0x51, 0xa5, 0x85, 0x64, 0x21, 0xd0, 0x6c, + 0x16, 0x80, 0x66, 0x33, 0x1a, 0x42, 0x02, 0x92, 0x69, 0x58, 0x92, 0x54, 0x0a, 0x2d, 0xf0, 0xe3, + 0x92, 0x25, 0x35, 0x4b, 0x2c, 0x4b, 0x72, 0x96, 0x54, 0x2c, 0xa9, 0x58, 0xf7, 0x24, 0xe4, 0xfa, + 0xa3, 0x09, 0xc8, 0x42, 0xc4, 0x34, 0x14, 0xa1, 0xa0, 0x85, 0x44, 0x60, 0x3e, 0x14, 0xbb, 0x62, + 0x53, 0xac, 0x4a, 0x69, 0xf7, 0xb4, 0xd3, 0x16, 0x95, 0xa0, 0x84, 0x91, 0x0b, 0xd8, 0xb6, 0xe3, + 0x9e, 0x75, 0x33, 0x26, 0xc9, 0x40, 0x2a, 0x2e, 0x12, 0x58, 0xb6, 0xb0, 0x27, 0xdd, 0x58, 0xd6, + 0x9a, 0xd9, 0x3d, 0xd9, 0xfd, 0x5a, 0x9a, 0x44, 0xf3, 0xb8, 0xed, 0x69, 0xb6, 0xfb, 0xb9, 0xd1, + 0x7c, 0x45, 0x79, 0xa2, 0x95, 0x96, 0xdb, 0xc8, 0xd1, 0xf7, 0x3e, 0xba, 0xfb, 0xb6, 0x4c, 0xef, + 0xd9, 0x8a, 0x29, 0x85, 0xdf, 0xa1, 0xbd, 0xd8, 0x66, 0xb8, 0x64, 0x9a, 0x4d, 0x9c, 0x43, 0xe7, + 0x78, 0x74, 0x7a, 0x4c, 0x3a, 0x93, 0xb7, 0x81, 0x93, 0x37, 0xc1, 0x27, 0x58, 0xe8, 0xd7, 0x96, + 0xf1, 0xf0, 0xfa, 0xfa, 0xa0, 0x77, 0x73, 0x7d, 0x80, 0xea, 0x33, 0xff, 0x56, 0x0d, 0x9f, 0xa1, + 0x91, 0xed, 0x99, 0xf1, 0x22, 0x19, 0x39, 0xe9, 0x5b, 0xf1, 0x7d, 0xef, 0x41, 0x85, 0x8c, 0x2e, + 0xea, 0x2b, 0xbf, 0xf9, 0x0e, 0x7f, 0x46, 0x28, 0x65, 0x92, 0x59, 0x19, 0x1b, 0xea, 0x64, 0x70, + 0x38, 0xb0, 0x96, 0x5e, 0x90, 0x7f, 0xff, 0x0c, 0xa4, 0x39, 0x1e, 0xb9, 0xb8, 0x95, 0x7a, 0x9e, + 0x68, 0x79, 0x55, 0x5b, 0xae, 0x2f, 0xfc, 0x46, 0x3f, 0xf7, 0x29, 0xba, 0xbf, 0x85, 0xe0, 0x31, + 0x1a, 0x44, 0x70, 0x55, 0x84, 0xb3, 0xef, 0xe7, 0x4b, 0xfc, 0x10, 0x0d, 0x33, 0xb6, 0x32, 0x50, + 0xce, 0xe4, 0x97, 0x9b, 0xf3, 0xfe, 0xdc, 0x39, 0xda, 0x38, 0x68, 0xdc, 0xec, 0xff, 0x8a, 0x2b, + 0x8d, 0x2f, 0x5b, 0x11, 0xd3, 0xbf, 0x44, 0xdc, 0xf8, 0x4d, 0x24, 0xc7, 0x8b, 0xa4, 0xc7, 0x95, + 0xed, 0xbd, 0x3f, 0x27, 0x8d, 0x9c, 0x2f, 0xd1, 0x90, 0x6b, 0x88, 0x95, 0x75, 0x93, 0x67, 0x35, + 0xff, 0xdf, 0xac, 0xbc, 0x7b, 0x55, 0x93, 0xe1, 0xcb, 0x5c, 0xce, 0x2f, 0x55, 0xbd, 0x47, 0xeb, + 0x9f, 0xd3, 0xde, 0xc6, 0xd6, 0x37, 0x5b, 0x5f, 0x6e, 0xa6, 0xce, 0xda, 0xd6, 0xc6, 0xd6, 0x0f, + 0x5b, 0x5f, 0x7f, 0x4d, 0x7b, 0xef, 0xef, 0x54, 0x6a, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x86, + 0x21, 0xa9, 0x43, 0xef, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/generated.proto new file mode 100644 index 000000000000..c99a403d9ed8 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/generated.proto @@ -0,0 +1,59 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.kubernetes.pkg.apis.storage.v1beta1; + +import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; +import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; +import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; +import "k8s.io/kubernetes/pkg/runtime/generated.proto"; +import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; + + // Provisioner indicates the type of the provisioner. + optional string provisioner = 2; + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + map parameters = 3; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/register.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/register.go similarity index 76% rename from vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/register.go rename to vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/register.go index d2dfaa663e2e..85ab1025bbf0 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/register.go +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,26 +24,27 @@ import ( ) // GroupName is the group name use in this package -const GroupName = "federation" +const GroupName = "storage.k8s.io" // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = unversioned.GroupVersion{Group: GroupName, Version: "v1beta1"} var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, addDefaultingFuncs, addConversionFuncs) + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) AddToScheme = SchemeBuilder.AddToScheme ) +// Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Cluster{}, - &ClusterList{}, &v1.ListOptions{}, &v1.DeleteOptions{}, + &v1.ExportOptions{}, + + &StorageClass{}, + &StorageClassList{}, ) + versionedwatch.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } - -func (obj *Cluster) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } -func (obj *ClusterList) GetObjectKind() unversioned.ObjectKind { return &obj.TypeMeta } diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/types.generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/types.generated.go new file mode 100644 index 000000000000..78983ce7b04c --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/types.generated.go @@ -0,0 +1,900 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package v1beta1 + +import ( + "errors" + "fmt" + codec1978 "github.com/ugorji/go/codec" + pkg1_unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" + pkg2_v1 "k8s.io/client-go/1.4/pkg/api/v1" + pkg3_types "k8s.io/client-go/1.4/pkg/types" + "reflect" + "runtime" + time "time" +) + +const ( + // ----- content types ---- + codecSelferC_UTF81234 = 1 + codecSelferC_RAW1234 = 0 + // ----- value types used ---- + codecSelferValueTypeArray1234 = 10 + codecSelferValueTypeMap1234 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey1234 = 2 + codecSelfer_containerMapValue1234 = 3 + codecSelfer_containerMapEnd1234 = 4 + codecSelfer_containerArrayElem1234 = 6 + codecSelfer_containerArrayEnd1234 = 7 +) + +var ( + codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer1234 struct{} + +func init() { + if codec1978.GenVersion != 5 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 5, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 pkg1_unversioned.TypeMeta + var v1 pkg2_v1.ObjectMeta + var v2 pkg3_types.UID + var v3 time.Time + _, _, _, _ = v0, v1, v2, v3 + } +} + +func (x *StorageClass) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [5]bool + _, _, _ = yysep2, yyq2, yy2arr2 + const yyr2 bool = false + yyq2[0] = x.Kind != "" + yyq2[1] = x.APIVersion != "" + yyq2[2] = true + yyq2[4] = len(x.Parameters) != 0 + var yynn2 int + if yyr2 || yy2arr2 { + r.EncodeArrayStart(5) + } else { + yynn2 = 1 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.EncodeMapStart(yynn2) + yynn2 = 0 + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[0] { + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq2[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[2] { + yy10 := &x.ObjectMeta + yy10.CodecEncodeSelf(e) + } else { + r.EncodeNil() + } + } else { + if yyq2[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy11 := &x.ObjectMeta + yy11.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("provisioner")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Provisioner)) + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq2[4] { + if x.Parameters == nil { + r.EncodeNil() + } else { + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + z.F.EncMapStringStringV(x.Parameters, false, e) + } + } + } else { + r.EncodeNil() + } + } else { + if yyq2[4] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("parameters")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Parameters == nil { + r.EncodeNil() + } else { + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + z.F.EncMapStringStringV(x.Parameters, false, e) + } + } + } + } + if yyr2 || yy2arr2 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClass) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym18 := z.DecBinary() + _ = yym18 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct19 := r.ContainerType() + if yyct19 == codecSelferValueTypeMap1234 { + yyl19 := r.ReadMapStart() + if yyl19 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl19, d) + } + } else if yyct19 == codecSelferValueTypeArray1234 { + yyl19 := r.ReadArrayStart() + if yyl19 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl19, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClass) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys20Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys20Slc + var yyhl20 bool = l >= 0 + for yyj20 := 0; ; yyj20++ { + if yyhl20 { + if yyj20 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys20Slc = r.DecodeBytes(yys20Slc, true, true) + yys20 := string(yys20Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys20 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + case "metadata": + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv23 := &x.ObjectMeta + yyv23.CodecDecodeSelf(d) + } + case "provisioner": + if r.TryDecodeAsNil() { + x.Provisioner = "" + } else { + x.Provisioner = string(r.DecodeString()) + } + case "parameters": + if r.TryDecodeAsNil() { + x.Parameters = nil + } else { + yyv25 := &x.Parameters + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + z.F.DecMapStringStringX(yyv25, false, d) + } + } + default: + z.DecStructFieldNotFound(-1, yys20) + } // end switch yys20 + } // end for yyj20 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClass) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj27 int + var yyb27 bool + var yyhl27 bool = l >= 0 + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ObjectMeta = pkg2_v1.ObjectMeta{} + } else { + yyv30 := &x.ObjectMeta + yyv30.CodecDecodeSelf(d) + } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Provisioner = "" + } else { + x.Provisioner = string(r.DecodeString()) + } + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Parameters = nil + } else { + yyv32 := &x.Parameters + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + z.F.DecMapStringStringX(yyv32, false, d) + } + } + for { + yyj27++ + if yyhl27 { + yyb27 = yyj27 > l + } else { + yyb27 = r.CheckBreak() + } + if yyb27 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj27-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x *StorageClassList) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym34 := z.EncBinary() + _ = yym34 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep35 := !z.EncBinary() + yy2arr35 := z.EncBasicHandle().StructToArray + var yyq35 [4]bool + _, _, _ = yysep35, yyq35, yy2arr35 + const yyr35 bool = false + yyq35[0] = x.Kind != "" + yyq35[1] = x.APIVersion != "" + yyq35[2] = true + var yynn35 int + if yyr35 || yy2arr35 { + r.EncodeArrayStart(4) + } else { + yynn35 = 1 + for _, b := range yyq35 { + if b { + yynn35++ + } + } + r.EncodeMapStart(yynn35) + yynn35 = 0 + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq35[0] { + yym37 := z.EncBinary() + _ = yym37 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq35[0] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("kind")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym38 := z.EncBinary() + _ = yym38 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) + } + } + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq35[1] { + yym40 := z.EncBinary() + _ = yym40 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } else { + r.EncodeString(codecSelferC_UTF81234, "") + } + } else { + if yyq35[1] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yym41 := z.EncBinary() + _ = yym41 + if false { + } else { + r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) + } + } + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if yyq35[2] { + yy43 := &x.ListMeta + yym44 := z.EncBinary() + _ = yym44 + if false { + } else if z.HasExtensions() && z.EncExt(yy43) { + } else { + z.EncFallback(yy43) + } + } else { + r.EncodeNil() + } + } else { + if yyq35[2] { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("metadata")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + yy45 := &x.ListMeta + yym46 := z.EncBinary() + _ = yym46 + if false { + } else if z.HasExtensions() && z.EncExt(yy45) { + } else { + z.EncFallback(yy45) + } + } + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym48 := z.EncBinary() + _ = yym48 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } else { + z.EncSendContainerState(codecSelfer_containerMapKey1234) + r.EncodeString(codecSelferC_UTF81234, string("items")) + z.EncSendContainerState(codecSelfer_containerMapValue1234) + if x.Items == nil { + r.EncodeNil() + } else { + yym49 := z.EncBinary() + _ = yym49 + if false { + } else { + h.encSliceStorageClass(([]StorageClass)(x.Items), e) + } + } + } + if yyr35 || yy2arr35 { + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + z.EncSendContainerState(codecSelfer_containerMapEnd1234) + } + } + } +} + +func (x *StorageClassList) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym50 := z.DecBinary() + _ = yym50 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct51 := r.ContainerType() + if yyct51 == codecSelferValueTypeMap1234 { + yyl51 := r.ReadMapStart() + if yyl51 == 0 { + z.DecSendContainerState(codecSelfer_containerMapEnd1234) + } else { + x.codecDecodeSelfFromMap(yyl51, d) + } + } else if yyct51 == codecSelferValueTypeArray1234 { + yyl51 := r.ReadArrayStart() + if yyl51 == 0 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + } else { + x.codecDecodeSelfFromArray(yyl51, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) + } + } +} + +func (x *StorageClassList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys52Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys52Slc + var yyhl52 bool = l >= 0 + for yyj52 := 0; ; yyj52++ { + if yyhl52 { + if yyj52 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + z.DecSendContainerState(codecSelfer_containerMapKey1234) + yys52Slc = r.DecodeBytes(yys52Slc, true, true) + yys52 := string(yys52Slc) + z.DecSendContainerState(codecSelfer_containerMapValue1234) + switch yys52 { + case "kind": + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + case "apiVersion": + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + case "metadata": + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv55 := &x.ListMeta + yym56 := z.DecBinary() + _ = yym56 + if false { + } else if z.HasExtensions() && z.DecExt(yyv55) { + } else { + z.DecFallback(yyv55, false) + } + } + case "items": + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv57 := &x.Items + yym58 := z.DecBinary() + _ = yym58 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv57), d) + } + } + default: + z.DecStructFieldNotFound(-1, yys52) + } // end switch yys52 + } // end for yyj52 + z.DecSendContainerState(codecSelfer_containerMapEnd1234) +} + +func (x *StorageClassList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj59 int + var yyb59 bool + var yyhl59 bool = l >= 0 + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Kind = "" + } else { + x.Kind = string(r.DecodeString()) + } + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.APIVersion = "" + } else { + x.APIVersion = string(r.DecodeString()) + } + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.ListMeta = pkg1_unversioned.ListMeta{} + } else { + yyv62 := &x.ListMeta + yym63 := z.DecBinary() + _ = yym63 + if false { + } else if z.HasExtensions() && z.DecExt(yyv62) { + } else { + z.DecFallback(yyv62, false) + } + } + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) + return + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + if r.TryDecodeAsNil() { + x.Items = nil + } else { + yyv64 := &x.Items + yym65 := z.DecBinary() + _ = yym65 + if false { + } else { + h.decSliceStorageClass((*[]StorageClass)(yyv64), d) + } + } + for { + yyj59++ + if yyhl59 { + yyb59 = yyj59 > l + } else { + yyb59 = r.CheckBreak() + } + if yyb59 { + break + } + z.DecSendContainerState(codecSelfer_containerArrayElem1234) + z.DecStructFieldNotFound(yyj59-1, "") + } + z.DecSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) encSliceStorageClass(v []StorageClass, e *codec1978.Encoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.EncodeArrayStart(len(v)) + for _, yyv66 := range v { + z.EncSendContainerState(codecSelfer_containerArrayElem1234) + yy67 := &yyv66 + yy67.CodecEncodeSelf(e) + } + z.EncSendContainerState(codecSelfer_containerArrayEnd1234) +} + +func (x codecSelfer1234) decSliceStorageClass(v *[]StorageClass, d *codec1978.Decoder) { + var h codecSelfer1234 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv68 := *v + yyh68, yyl68 := z.DecSliceHelperStart() + var yyc68 bool + if yyl68 == 0 { + if yyv68 == nil { + yyv68 = []StorageClass{} + yyc68 = true + } else if len(yyv68) != 0 { + yyv68 = yyv68[:0] + yyc68 = true + } + } else if yyl68 > 0 { + var yyrr68, yyrl68 int + var yyrt68 bool + if yyl68 > cap(yyv68) { + + yyrg68 := len(yyv68) > 0 + yyv268 := yyv68 + yyrl68, yyrt68 = z.DecInferLen(yyl68, z.DecBasicHandle().MaxInitLen, 280) + if yyrt68 { + if yyrl68 <= cap(yyv68) { + yyv68 = yyv68[:yyrl68] + } else { + yyv68 = make([]StorageClass, yyrl68) + } + } else { + yyv68 = make([]StorageClass, yyrl68) + } + yyc68 = true + yyrr68 = len(yyv68) + if yyrg68 { + copy(yyv68, yyv268) + } + } else if yyl68 != len(yyv68) { + yyv68 = yyv68[:yyl68] + yyc68 = true + } + yyj68 := 0 + for ; yyj68 < yyrr68; yyj68++ { + yyh68.ElemContainerState(yyj68) + if r.TryDecodeAsNil() { + yyv68[yyj68] = StorageClass{} + } else { + yyv69 := &yyv68[yyj68] + yyv69.CodecDecodeSelf(d) + } + + } + if yyrt68 { + for ; yyj68 < yyl68; yyj68++ { + yyv68 = append(yyv68, StorageClass{}) + yyh68.ElemContainerState(yyj68) + if r.TryDecodeAsNil() { + yyv68[yyj68] = StorageClass{} + } else { + yyv70 := &yyv68[yyj68] + yyv70.CodecDecodeSelf(d) + } + + } + } + + } else { + yyj68 := 0 + for ; !r.CheckBreak(); yyj68++ { + + if yyj68 >= len(yyv68) { + yyv68 = append(yyv68, StorageClass{}) // var yyz68 StorageClass + yyc68 = true + } + yyh68.ElemContainerState(yyj68) + if yyj68 < len(yyv68) { + if r.TryDecodeAsNil() { + yyv68[yyj68] = StorageClass{} + } else { + yyv71 := &yyv68[yyj68] + yyv71.CodecDecodeSelf(d) + } + + } else { + z.DecSwallow() + } + + } + if yyj68 < len(yyv68) { + yyv68 = yyv68[:yyj68] + yyc68 = true + } else if yyj68 == 0 && yyv68 == nil { + yyv68 = []StorageClass{} + yyc68 = true + } + } + yyh68.End() + if yyc68 { + *v = yyv68 + } +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/types.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/types.go new file mode 100644 index 000000000000..6d813c1bb652 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/types.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/client-go/1.4/pkg/api/unversioned" + "k8s.io/client-go/1.4/pkg/api/v1" +) + +// +genclient=true +// +nonNamespaced=true + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + unversioned.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` +} + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + unversioned.TypeMeta `json:",inline"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 000000000000..e8362e381ff8 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000000..741b367d242e --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/zz_generated.conversion.go @@ -0,0 +1,127 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1beta1 + +import ( + api "k8s.io/client-go/1.4/pkg/api" + storage "k8s.io/client-go/1.4/pkg/apis/storage" + conversion "k8s.io/client-go/1.4/pkg/conversion" + runtime "k8s.io/client-go/1.4/pkg/runtime" +) + +func init() { + SchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1beta1_StorageClass_To_storage_StorageClass, + Convert_storage_StorageClass_To_v1beta1_StorageClass, + Convert_v1beta1_StorageClassList_To_storage_StorageClassList, + Convert_storage_StorageClassList_To_v1beta1_StorageClassList, + ) +} + +func autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + out.Provisioner = in.Provisioner + out.Parameters = in.Parameters + return nil +} + +func Convert_v1beta1_StorageClass_To_storage_StorageClass(in *StorageClass, out *storage.StorageClass, s conversion.Scope) error { + return autoConvert_v1beta1_StorageClass_To_storage_StorageClass(in, out, s) +} + +func autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + // TODO: Inefficient conversion - can we improve it? + if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { + return err + } + out.Provisioner = in.Provisioner + out.Parameters = in.Parameters + return nil +} + +func Convert_storage_StorageClass_To_v1beta1_StorageClass(in *storage.StorageClass, out *StorageClass, s conversion.Scope) error { + return autoConvert_storage_StorageClass_To_v1beta1_StorageClass(in, out, s) +} + +func autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]storage.StorageClass, len(*in)) + for i := range *in { + if err := Convert_v1beta1_StorageClass_To_storage_StorageClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_v1beta1_StorageClassList_To_storage_StorageClassList(in *StorageClassList, out *storage.StorageClassList, s conversion.Scope) error { + return autoConvert_v1beta1_StorageClassList_To_storage_StorageClassList(in, out, s) +} + +func autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { + return err + } + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := Convert_storage_StorageClass_To_v1beta1_StorageClass(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +func Convert_storage_StorageClassList_To_v1beta1_StorageClassList(in *storage.StorageClassList, out *StorageClassList, s conversion.Scope) error { + return autoConvert_storage_StorageClassList_To_v1beta1_StorageClassList(in, out, s) +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..dc1f2958e5ca --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,84 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/client-go/1.4/pkg/api/v1" + conversion "k8s.io/client-go/1.4/pkg/conversion" + runtime "k8s.io/client-go/1.4/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, + ) +} + +func DeepCopy_v1beta1_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + out.TypeMeta = in.TypeMeta + if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + out.Provisioner = in.Provisioner + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } else { + out.Parameters = nil + } + return nil + } +} + +func DeepCopy_v1beta1_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_v1beta1_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/apis/storage/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/zz_generated.deepcopy.go new file mode 100644 index 000000000000..acf8aeadca88 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/pkg/apis/storage/zz_generated.deepcopy.go @@ -0,0 +1,84 @@ +// +build !ignore_autogenerated + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package storage + +import ( + api "k8s.io/client-go/1.4/pkg/api" + conversion "k8s.io/client-go/1.4/pkg/conversion" + runtime "k8s.io/client-go/1.4/pkg/runtime" + reflect "reflect" +) + +func init() { + SchemeBuilder.Register(RegisterDeepCopies) +} + +// RegisterDeepCopies adds deep-copy functions to the given scheme. Public +// to allow building arbitrary schemes. +func RegisterDeepCopies(scheme *runtime.Scheme) error { + return scheme.AddGeneratedDeepCopyFuncs( + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_storage_StorageClass, InType: reflect.TypeOf(&StorageClass{})}, + conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_storage_StorageClassList, InType: reflect.TypeOf(&StorageClassList{})}, + ) +} + +func DeepCopy_storage_StorageClass(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClass) + out := out.(*StorageClass) + out.TypeMeta = in.TypeMeta + if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { + return err + } + out.Provisioner = in.Provisioner + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string) + for key, val := range *in { + (*out)[key] = val + } + } else { + out.Parameters = nil + } + return nil + } +} + +func DeepCopy_storage_StorageClassList(in interface{}, out interface{}, c *conversion.Cloner) error { + { + in := in.(*StorageClassList) + out := out.(*StorageClassList) + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + if err := DeepCopy_storage_StorageClass(&(*in)[i], &(*out)[i], c); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil + } +} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/types.generated.go b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/types.generated.go deleted file mode 100644 index 01d61869fe50..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/types.generated.go +++ /dev/null @@ -1,2954 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package federation - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg1_api "k8s.io/client-go/1.4/pkg/api" - pkg2_unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" - pkg3_types "k8s.io/client-go/1.4/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg1_api.LocalObjectReference - var v1 pkg2_unversioned.Time - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *ServerAddressByClientCIDR) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clientCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServerAddressByClientCIDR) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct10 := r.ContainerType() - if yyct10 == codecSelferValueTypeMap1234 { - yyl10 := r.ReadMapStart() - if yyl10 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl10, d) - } - } else if yyct10 == codecSelferValueTypeArray1234 { - yyl10 := r.ReadArrayStart() - if yyl10 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl10, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys11Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys11Slc - var yyhl11 bool = l >= 0 - for yyj11 := 0; ; yyj11++ { - if yyhl11 { - if yyj11 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys11Slc = r.DecodeBytes(yys11Slc, true, true) - yys11 := string(yys11Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys11 { - case "clientCIDR": - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - case "serverAddress": - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys11) - } // end switch yys11 - } // end for yyj11 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep18 := !z.EncBinary() - yy2arr18 := z.EncBasicHandle().StructToArray - var yyq18 [2]bool - _, _, _ = yysep18, yyq18, yy2arr18 - const yyr18 bool = false - yyq18[1] = x.SecretRef != nil - var yynn18 int - if yyr18 || yy2arr18 { - r.EncodeArrayStart(2) - } else { - yynn18 = 1 - for _, b := range yyq18 { - if b { - yynn18++ - } - } - r.EncodeMapStart(yynn18) - yynn18 = 0 - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddressByClientCIDRs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq18[1] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq18[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym23 := z.DecBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct24 := r.ContainerType() - if yyct24 == codecSelferValueTypeMap1234 { - yyl24 := r.ReadMapStart() - if yyl24 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl24, d) - } - } else if yyct24 == codecSelferValueTypeArray1234 { - yyl24 := r.ReadArrayStart() - if yyl24 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl24, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys25Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys25Slc - var yyhl25 bool = l >= 0 - for yyj25 := 0; ; yyj25++ { - if yyhl25 { - if yyj25 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys25Slc = r.DecodeBytes(yys25Slc, true, true) - yys25 := string(yys25Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys25 { - case "serverAddressByClientCIDRs": - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv26 := &x.ServerAddressByClientCIDRs - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv26), d) - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_api.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys25) - } // end switch yys25 - } // end for yyj25 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj29 int - var yyb29 bool - var yyhl29 bool = l >= 0 - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv30 := &x.ServerAddressByClientCIDRs - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv30), d) - } - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_api.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - for { - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj29-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ClusterConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym33 := z.EncBinary() - _ = yym33 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ClusterConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym34 := z.DecBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ClusterCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep36 := !z.EncBinary() - yy2arr36 := z.EncBasicHandle().StructToArray - var yyq36 [6]bool - _, _, _ = yysep36, yyq36, yy2arr36 - const yyr36 bool = false - yyq36[2] = true - yyq36[3] = true - yyq36[4] = x.Reason != "" - yyq36[5] = x.Message != "" - var yynn36 int - if yyr36 || yy2arr36 { - r.EncodeArrayStart(6) - } else { - yynn36 = 2 - for _, b := range yyq36 { - if b { - yynn36++ - } - } - r.EncodeMapStart(yynn36) - yynn36 = 0 - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym40 := z.EncBinary() - _ = yym40 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[2] { - yy42 := &x.LastProbeTime - yym43 := z.EncBinary() - _ = yym43 - if false { - } else if z.HasExtensions() && z.EncExt(yy42) { - } else if yym43 { - z.EncBinaryMarshal(yy42) - } else if !yym43 && z.IsJSONHandle() { - z.EncJSONMarshal(yy42) - } else { - z.EncFallback(yy42) - } - } else { - r.EncodeNil() - } - } else { - if yyq36[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy44 := &x.LastProbeTime - yym45 := z.EncBinary() - _ = yym45 - if false { - } else if z.HasExtensions() && z.EncExt(yy44) { - } else if yym45 { - z.EncBinaryMarshal(yy44) - } else if !yym45 && z.IsJSONHandle() { - z.EncJSONMarshal(yy44) - } else { - z.EncFallback(yy44) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[3] { - yy47 := &x.LastTransitionTime - yym48 := z.EncBinary() - _ = yym48 - if false { - } else if z.HasExtensions() && z.EncExt(yy47) { - } else if yym48 { - z.EncBinaryMarshal(yy47) - } else if !yym48 && z.IsJSONHandle() { - z.EncJSONMarshal(yy47) - } else { - z.EncFallback(yy47) - } - } else { - r.EncodeNil() - } - } else { - if yyq36[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy49 := &x.LastTransitionTime - yym50 := z.EncBinary() - _ = yym50 - if false { - } else if z.HasExtensions() && z.EncExt(yy49) { - } else if yym50 { - z.EncBinaryMarshal(yy49) - } else if !yym50 && z.IsJSONHandle() { - z.EncJSONMarshal(yy49) - } else { - z.EncFallback(yy49) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[4] { - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq36[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[5] { - yym55 := z.EncBinary() - _ = yym55 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq36[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym57 := z.DecBinary() - _ = yym57 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct58 := r.ContainerType() - if yyct58 == codecSelferValueTypeMap1234 { - yyl58 := r.ReadMapStart() - if yyl58 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl58, d) - } - } else if yyct58 == codecSelferValueTypeArray1234 { - yyl58 := r.ReadArrayStart() - if yyl58 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl58, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys59Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys59Slc - var yyhl59 bool = l >= 0 - for yyj59 := 0; ; yyj59++ { - if yyhl59 { - if yyj59 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys59Slc = r.DecodeBytes(yys59Slc, true, true) - yys59 := string(yys59Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys59 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_api.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv62 := &x.LastProbeTime - yym63 := z.DecBinary() - _ = yym63 - if false { - } else if z.HasExtensions() && z.DecExt(yyv62) { - } else if yym63 { - z.DecBinaryUnmarshal(yyv62) - } else if !yym63 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv62) - } else { - z.DecFallback(yyv62, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv64 := &x.LastTransitionTime - yym65 := z.DecBinary() - _ = yym65 - if false { - } else if z.HasExtensions() && z.DecExt(yyv64) { - } else if yym65 { - z.DecBinaryUnmarshal(yyv64) - } else if !yym65 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv64) - } else { - z.DecFallback(yyv64, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys59) - } // end switch yys59 - } // end for yyj59 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj68 int - var yyb68 bool - var yyhl68 bool = l >= 0 - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_api.ConditionStatus(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv71 := &x.LastProbeTime - yym72 := z.DecBinary() - _ = yym72 - if false { - } else if z.HasExtensions() && z.DecExt(yyv71) { - } else if yym72 { - z.DecBinaryUnmarshal(yyv71) - } else if !yym72 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv71) - } else { - z.DecFallback(yyv71, false) - } - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv73 := &x.LastTransitionTime - yym74 := z.DecBinary() - _ = yym74 - if false { - } else if z.HasExtensions() && z.DecExt(yyv73) { - } else if yym74 { - z.DecBinaryUnmarshal(yyv73) - } else if !yym74 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv73) - } else { - z.DecFallback(yyv73, false) - } - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj68-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym77 := z.EncBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep78 := !z.EncBinary() - yy2arr78 := z.EncBasicHandle().StructToArray - var yyq78 [3]bool - _, _, _ = yysep78, yyq78, yy2arr78 - const yyr78 bool = false - yyq78[0] = len(x.Conditions) != 0 - yyq78[1] = len(x.Zones) != 0 - yyq78[2] = x.Region != "" - var yynn78 int - if yyr78 || yy2arr78 { - r.EncodeArrayStart(3) - } else { - yynn78 = 0 - for _, b := range yyq78 { - if b { - yynn78++ - } - } - r.EncodeMapStart(yynn78) - yynn78 = 0 - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym80 := z.EncBinary() - _ = yym80 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq78[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym81 := z.EncBinary() - _ = yym81 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[1] { - if x.Zones == nil { - r.EncodeNil() - } else { - yym83 := z.EncBinary() - _ = yym83 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq78[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("zones")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Zones == nil { - r.EncodeNil() - } else { - yym84 := z.EncBinary() - _ = yym84 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[2] { - yym86 := z.EncBinary() - _ = yym86 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq78[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("region")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym87 := z.EncBinary() - _ = yym87 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym88 := z.DecBinary() - _ = yym88 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct89 := r.ContainerType() - if yyct89 == codecSelferValueTypeMap1234 { - yyl89 := r.ReadMapStart() - if yyl89 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl89, d) - } - } else if yyct89 == codecSelferValueTypeArray1234 { - yyl89 := r.ReadArrayStart() - if yyl89 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl89, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys90Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys90Slc - var yyhl90 bool = l >= 0 - for yyj90 := 0; ; yyj90++ { - if yyhl90 { - if yyj90 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys90Slc = r.DecodeBytes(yys90Slc, true, true) - yys90 := string(yys90Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys90 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv91 := &x.Conditions - yym92 := z.DecBinary() - _ = yym92 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv91), d) - } - } - case "zones": - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv93 := &x.Zones - yym94 := z.DecBinary() - _ = yym94 - if false { - } else { - z.F.DecSliceStringX(yyv93, false, d) - } - } - case "region": - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys90) - } // end switch yys90 - } // end for yyj90 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj96 int - var yyb96 bool - var yyhl96 bool = l >= 0 - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv97 := &x.Conditions - yym98 := z.DecBinary() - _ = yym98 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv97), d) - } - } - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv99 := &x.Zones - yym100 := z.DecBinary() - _ = yym100 - if false { - } else { - z.F.DecSliceStringX(yyv99, false, d) - } - } - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - for { - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj96-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Cluster) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym102 := z.EncBinary() - _ = yym102 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep103 := !z.EncBinary() - yy2arr103 := z.EncBasicHandle().StructToArray - var yyq103 [5]bool - _, _, _ = yysep103, yyq103, yy2arr103 - const yyr103 bool = false - yyq103[0] = x.Kind != "" - yyq103[1] = x.APIVersion != "" - yyq103[2] = true - yyq103[3] = true - yyq103[4] = true - var yynn103 int - if yyr103 || yy2arr103 { - r.EncodeArrayStart(5) - } else { - yynn103 = 0 - for _, b := range yyq103 { - if b { - yynn103++ - } - } - r.EncodeMapStart(yynn103) - yynn103 = 0 - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[0] { - yym105 := z.EncBinary() - _ = yym105 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq103[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym106 := z.EncBinary() - _ = yym106 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[1] { - yym108 := z.EncBinary() - _ = yym108 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq103[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym109 := z.EncBinary() - _ = yym109 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[2] { - yy111 := &x.ObjectMeta - yy111.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy112 := &x.ObjectMeta - yy112.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[3] { - yy114 := &x.Spec - yy114.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy115 := &x.Spec - yy115.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[4] { - yy117 := &x.Status - yy117.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy118 := &x.Status - yy118.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Cluster) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym119 := z.DecBinary() - _ = yym119 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct120 := r.ContainerType() - if yyct120 == codecSelferValueTypeMap1234 { - yyl120 := r.ReadMapStart() - if yyl120 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl120, d) - } - } else if yyct120 == codecSelferValueTypeArray1234 { - yyl120 := r.ReadArrayStart() - if yyl120 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl120, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Cluster) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys121Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys121Slc - var yyhl121 bool = l >= 0 - for yyj121 := 0; ; yyj121++ { - if yyhl121 { - if yyj121 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys121Slc = r.DecodeBytes(yys121Slc, true, true) - yys121 := string(yys121Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys121 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_api.ObjectMeta{} - } else { - yyv124 := &x.ObjectMeta - yyv124.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv125 := &x.Spec - yyv125.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv126 := &x.Status - yyv126.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys121) - } // end switch yys121 - } // end for yyj121 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Cluster) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj127 int - var yyb127 bool - var yyhl127 bool = l >= 0 - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_api.ObjectMeta{} - } else { - yyv130 := &x.ObjectMeta - yyv130.CodecDecodeSelf(d) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv131 := &x.Spec - yyv131.CodecDecodeSelf(d) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv132 := &x.Status - yyv132.CodecDecodeSelf(d) - } - for { - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj127-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym133 := z.EncBinary() - _ = yym133 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep134 := !z.EncBinary() - yy2arr134 := z.EncBasicHandle().StructToArray - var yyq134 [4]bool - _, _, _ = yysep134, yyq134, yy2arr134 - const yyr134 bool = false - yyq134[0] = x.Kind != "" - yyq134[1] = x.APIVersion != "" - yyq134[2] = true - var yynn134 int - if yyr134 || yy2arr134 { - r.EncodeArrayStart(4) - } else { - yynn134 = 1 - for _, b := range yyq134 { - if b { - yynn134++ - } - } - r.EncodeMapStart(yynn134) - yynn134 = 0 - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[0] { - yym136 := z.EncBinary() - _ = yym136 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq134[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym137 := z.EncBinary() - _ = yym137 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[1] { - yym139 := z.EncBinary() - _ = yym139 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq134[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym140 := z.EncBinary() - _ = yym140 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[2] { - yy142 := &x.ListMeta - yym143 := z.EncBinary() - _ = yym143 - if false { - } else if z.HasExtensions() && z.EncExt(yy142) { - } else { - z.EncFallback(yy142) - } - } else { - r.EncodeNil() - } - } else { - if yyq134[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy144 := &x.ListMeta - yym145 := z.EncBinary() - _ = yym145 - if false { - } else if z.HasExtensions() && z.EncExt(yy144) { - } else { - z.EncFallback(yy144) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym147 := z.EncBinary() - _ = yym147 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym148 := z.EncBinary() - _ = yym148 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym149 := z.DecBinary() - _ = yym149 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct150 := r.ContainerType() - if yyct150 == codecSelferValueTypeMap1234 { - yyl150 := r.ReadMapStart() - if yyl150 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl150, d) - } - } else if yyct150 == codecSelferValueTypeArray1234 { - yyl150 := r.ReadArrayStart() - if yyl150 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl150, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys151Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys151Slc - var yyhl151 bool = l >= 0 - for yyj151 := 0; ; yyj151++ { - if yyhl151 { - if yyj151 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys151Slc = r.DecodeBytes(yys151Slc, true, true) - yys151 := string(yys151Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys151 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv154 := &x.ListMeta - yym155 := z.DecBinary() - _ = yym155 - if false { - } else if z.HasExtensions() && z.DecExt(yyv154) { - } else { - z.DecFallback(yyv154, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv156 := &x.Items - yym157 := z.DecBinary() - _ = yym157 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv156), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys151) - } // end switch yys151 - } // end for yyj151 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj158 int - var yyb158 bool - var yyhl158 bool = l >= 0 - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv161 := &x.ListMeta - yym162 := z.DecBinary() - _ = yym162 - if false { - } else if z.HasExtensions() && z.DecExt(yyv161) { - } else { - z.DecFallback(yyv161, false) - } - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv163 := &x.Items - yym164 := z.DecBinary() - _ = yym164 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv163), d) - } - } - for { - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj158-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *FederatedReplicaSetPreferences) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym165 := z.EncBinary() - _ = yym165 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep166 := !z.EncBinary() - yy2arr166 := z.EncBasicHandle().StructToArray - var yyq166 [2]bool - _, _, _ = yysep166, yyq166, yy2arr166 - const yyr166 bool = false - yyq166[0] = x.Rebalance != false - yyq166[1] = len(x.Clusters) != 0 - var yynn166 int - if yyr166 || yy2arr166 { - r.EncodeArrayStart(2) - } else { - yynn166 = 0 - for _, b := range yyq166 { - if b { - yynn166++ - } - } - r.EncodeMapStart(yynn166) - yynn166 = 0 - } - if yyr166 || yy2arr166 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq166[0] { - yym168 := z.EncBinary() - _ = yym168 - if false { - } else { - r.EncodeBool(bool(x.Rebalance)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq166[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("rebalance")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym169 := z.EncBinary() - _ = yym169 - if false { - } else { - r.EncodeBool(bool(x.Rebalance)) - } - } - } - if yyr166 || yy2arr166 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq166[1] { - if x.Clusters == nil { - r.EncodeNil() - } else { - yym171 := z.EncBinary() - _ = yym171 - if false { - } else { - h.encMapstringClusterReplicaSetPreferences((map[string]ClusterReplicaSetPreferences)(x.Clusters), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq166[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clusters")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Clusters == nil { - r.EncodeNil() - } else { - yym172 := z.EncBinary() - _ = yym172 - if false { - } else { - h.encMapstringClusterReplicaSetPreferences((map[string]ClusterReplicaSetPreferences)(x.Clusters), e) - } - } - } - } - if yyr166 || yy2arr166 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *FederatedReplicaSetPreferences) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym173 := z.DecBinary() - _ = yym173 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct174 := r.ContainerType() - if yyct174 == codecSelferValueTypeMap1234 { - yyl174 := r.ReadMapStart() - if yyl174 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl174, d) - } - } else if yyct174 == codecSelferValueTypeArray1234 { - yyl174 := r.ReadArrayStart() - if yyl174 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl174, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *FederatedReplicaSetPreferences) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys175Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys175Slc - var yyhl175 bool = l >= 0 - for yyj175 := 0; ; yyj175++ { - if yyhl175 { - if yyj175 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys175Slc = r.DecodeBytes(yys175Slc, true, true) - yys175 := string(yys175Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys175 { - case "rebalance": - if r.TryDecodeAsNil() { - x.Rebalance = false - } else { - x.Rebalance = bool(r.DecodeBool()) - } - case "clusters": - if r.TryDecodeAsNil() { - x.Clusters = nil - } else { - yyv177 := &x.Clusters - yym178 := z.DecBinary() - _ = yym178 - if false { - } else { - h.decMapstringClusterReplicaSetPreferences((*map[string]ClusterReplicaSetPreferences)(yyv177), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys175) - } // end switch yys175 - } // end for yyj175 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *FederatedReplicaSetPreferences) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj179 int - var yyb179 bool - var yyhl179 bool = l >= 0 - yyj179++ - if yyhl179 { - yyb179 = yyj179 > l - } else { - yyb179 = r.CheckBreak() - } - if yyb179 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Rebalance = false - } else { - x.Rebalance = bool(r.DecodeBool()) - } - yyj179++ - if yyhl179 { - yyb179 = yyj179 > l - } else { - yyb179 = r.CheckBreak() - } - if yyb179 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Clusters = nil - } else { - yyv181 := &x.Clusters - yym182 := z.DecBinary() - _ = yym182 - if false { - } else { - h.decMapstringClusterReplicaSetPreferences((*map[string]ClusterReplicaSetPreferences)(yyv181), d) - } - } - for { - yyj179++ - if yyhl179 { - yyb179 = yyj179 > l - } else { - yyb179 = r.CheckBreak() - } - if yyb179 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj179-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterReplicaSetPreferences) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym183 := z.EncBinary() - _ = yym183 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep184 := !z.EncBinary() - yy2arr184 := z.EncBasicHandle().StructToArray - var yyq184 [3]bool - _, _, _ = yysep184, yyq184, yy2arr184 - const yyr184 bool = false - yyq184[0] = x.MinReplicas != 0 - yyq184[1] = x.MaxReplicas != nil - var yynn184 int - if yyr184 || yy2arr184 { - r.EncodeArrayStart(3) - } else { - yynn184 = 1 - for _, b := range yyq184 { - if b { - yynn184++ - } - } - r.EncodeMapStart(yynn184) - yynn184 = 0 - } - if yyr184 || yy2arr184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq184[0] { - yym186 := z.EncBinary() - _ = yym186 - if false { - } else { - r.EncodeInt(int64(x.MinReplicas)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq184[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("minReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym187 := z.EncBinary() - _ = yym187 - if false { - } else { - r.EncodeInt(int64(x.MinReplicas)) - } - } - } - if yyr184 || yy2arr184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq184[1] { - if x.MaxReplicas == nil { - r.EncodeNil() - } else { - yy189 := *x.MaxReplicas - yym190 := z.EncBinary() - _ = yym190 - if false { - } else { - r.EncodeInt(int64(yy189)) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq184[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("maxReplicas")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.MaxReplicas == nil { - r.EncodeNil() - } else { - yy191 := *x.MaxReplicas - yym192 := z.EncBinary() - _ = yym192 - if false { - } else { - r.EncodeInt(int64(yy191)) - } - } - } - } - if yyr184 || yy2arr184 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym194 := z.EncBinary() - _ = yym194 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("Weight")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym195 := z.EncBinary() - _ = yym195 - if false { - } else { - r.EncodeInt(int64(x.Weight)) - } - } - if yyr184 || yy2arr184 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterReplicaSetPreferences) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym196 := z.DecBinary() - _ = yym196 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct197 := r.ContainerType() - if yyct197 == codecSelferValueTypeMap1234 { - yyl197 := r.ReadMapStart() - if yyl197 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl197, d) - } - } else if yyct197 == codecSelferValueTypeArray1234 { - yyl197 := r.ReadArrayStart() - if yyl197 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl197, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterReplicaSetPreferences) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys198Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys198Slc - var yyhl198 bool = l >= 0 - for yyj198 := 0; ; yyj198++ { - if yyhl198 { - if yyj198 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys198Slc = r.DecodeBytes(yys198Slc, true, true) - yys198 := string(yys198Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys198 { - case "minReplicas": - if r.TryDecodeAsNil() { - x.MinReplicas = 0 - } else { - x.MinReplicas = int64(r.DecodeInt(64)) - } - case "maxReplicas": - if r.TryDecodeAsNil() { - if x.MaxReplicas != nil { - x.MaxReplicas = nil - } - } else { - if x.MaxReplicas == nil { - x.MaxReplicas = new(int64) - } - yym201 := z.DecBinary() - _ = yym201 - if false { - } else { - *((*int64)(x.MaxReplicas)) = int64(r.DecodeInt(64)) - } - } - case "Weight": - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys198) - } // end switch yys198 - } // end for yyj198 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterReplicaSetPreferences) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj203 int - var yyb203 bool - var yyhl203 bool = l >= 0 - yyj203++ - if yyhl203 { - yyb203 = yyj203 > l - } else { - yyb203 = r.CheckBreak() - } - if yyb203 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.MinReplicas = 0 - } else { - x.MinReplicas = int64(r.DecodeInt(64)) - } - yyj203++ - if yyhl203 { - yyb203 = yyj203 > l - } else { - yyb203 = r.CheckBreak() - } - if yyb203 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.MaxReplicas != nil { - x.MaxReplicas = nil - } - } else { - if x.MaxReplicas == nil { - x.MaxReplicas = new(int64) - } - yym206 := z.DecBinary() - _ = yym206 - if false { - } else { - *((*int64)(x.MaxReplicas)) = int64(r.DecodeInt(64)) - } - } - yyj203++ - if yyhl203 { - yyb203 = yyj203 > l - } else { - yyb203 = r.CheckBreak() - } - if yyb203 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Weight = 0 - } else { - x.Weight = int64(r.DecodeInt(64)) - } - for { - yyj203++ - if yyhl203 { - yyb203 = yyj203 > l - } else { - yyb203 = r.CheckBreak() - } - if yyb203 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj203-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceServerAddressByClientCIDR(v []ServerAddressByClientCIDR, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv208 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy209 := &yyv208 - yy209.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServerAddressByClientCIDR(v *[]ServerAddressByClientCIDR, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv210 := *v - yyh210, yyl210 := z.DecSliceHelperStart() - var yyc210 bool - if yyl210 == 0 { - if yyv210 == nil { - yyv210 = []ServerAddressByClientCIDR{} - yyc210 = true - } else if len(yyv210) != 0 { - yyv210 = yyv210[:0] - yyc210 = true - } - } else if yyl210 > 0 { - var yyrr210, yyrl210 int - var yyrt210 bool - if yyl210 > cap(yyv210) { - - yyrg210 := len(yyv210) > 0 - yyv2210 := yyv210 - yyrl210, yyrt210 = z.DecInferLen(yyl210, z.DecBasicHandle().MaxInitLen, 32) - if yyrt210 { - if yyrl210 <= cap(yyv210) { - yyv210 = yyv210[:yyrl210] - } else { - yyv210 = make([]ServerAddressByClientCIDR, yyrl210) - } - } else { - yyv210 = make([]ServerAddressByClientCIDR, yyrl210) - } - yyc210 = true - yyrr210 = len(yyv210) - if yyrg210 { - copy(yyv210, yyv2210) - } - } else if yyl210 != len(yyv210) { - yyv210 = yyv210[:yyl210] - yyc210 = true - } - yyj210 := 0 - for ; yyj210 < yyrr210; yyj210++ { - yyh210.ElemContainerState(yyj210) - if r.TryDecodeAsNil() { - yyv210[yyj210] = ServerAddressByClientCIDR{} - } else { - yyv211 := &yyv210[yyj210] - yyv211.CodecDecodeSelf(d) - } - - } - if yyrt210 { - for ; yyj210 < yyl210; yyj210++ { - yyv210 = append(yyv210, ServerAddressByClientCIDR{}) - yyh210.ElemContainerState(yyj210) - if r.TryDecodeAsNil() { - yyv210[yyj210] = ServerAddressByClientCIDR{} - } else { - yyv212 := &yyv210[yyj210] - yyv212.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj210 := 0 - for ; !r.CheckBreak(); yyj210++ { - - if yyj210 >= len(yyv210) { - yyv210 = append(yyv210, ServerAddressByClientCIDR{}) // var yyz210 ServerAddressByClientCIDR - yyc210 = true - } - yyh210.ElemContainerState(yyj210) - if yyj210 < len(yyv210) { - if r.TryDecodeAsNil() { - yyv210[yyj210] = ServerAddressByClientCIDR{} - } else { - yyv213 := &yyv210[yyj210] - yyv213.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj210 < len(yyv210) { - yyv210 = yyv210[:yyj210] - yyc210 = true - } else if yyj210 == 0 && yyv210 == nil { - yyv210 = []ServerAddressByClientCIDR{} - yyc210 = true - } - } - yyh210.End() - if yyc210 { - *v = yyv210 - } -} - -func (x codecSelfer1234) encSliceClusterCondition(v []ClusterCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv214 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy215 := &yyv214 - yy215.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterCondition(v *[]ClusterCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv216 := *v - yyh216, yyl216 := z.DecSliceHelperStart() - var yyc216 bool - if yyl216 == 0 { - if yyv216 == nil { - yyv216 = []ClusterCondition{} - yyc216 = true - } else if len(yyv216) != 0 { - yyv216 = yyv216[:0] - yyc216 = true - } - } else if yyl216 > 0 { - var yyrr216, yyrl216 int - var yyrt216 bool - if yyl216 > cap(yyv216) { - - yyrg216 := len(yyv216) > 0 - yyv2216 := yyv216 - yyrl216, yyrt216 = z.DecInferLen(yyl216, z.DecBasicHandle().MaxInitLen, 112) - if yyrt216 { - if yyrl216 <= cap(yyv216) { - yyv216 = yyv216[:yyrl216] - } else { - yyv216 = make([]ClusterCondition, yyrl216) - } - } else { - yyv216 = make([]ClusterCondition, yyrl216) - } - yyc216 = true - yyrr216 = len(yyv216) - if yyrg216 { - copy(yyv216, yyv2216) - } - } else if yyl216 != len(yyv216) { - yyv216 = yyv216[:yyl216] - yyc216 = true - } - yyj216 := 0 - for ; yyj216 < yyrr216; yyj216++ { - yyh216.ElemContainerState(yyj216) - if r.TryDecodeAsNil() { - yyv216[yyj216] = ClusterCondition{} - } else { - yyv217 := &yyv216[yyj216] - yyv217.CodecDecodeSelf(d) - } - - } - if yyrt216 { - for ; yyj216 < yyl216; yyj216++ { - yyv216 = append(yyv216, ClusterCondition{}) - yyh216.ElemContainerState(yyj216) - if r.TryDecodeAsNil() { - yyv216[yyj216] = ClusterCondition{} - } else { - yyv218 := &yyv216[yyj216] - yyv218.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj216 := 0 - for ; !r.CheckBreak(); yyj216++ { - - if yyj216 >= len(yyv216) { - yyv216 = append(yyv216, ClusterCondition{}) // var yyz216 ClusterCondition - yyc216 = true - } - yyh216.ElemContainerState(yyj216) - if yyj216 < len(yyv216) { - if r.TryDecodeAsNil() { - yyv216[yyj216] = ClusterCondition{} - } else { - yyv219 := &yyv216[yyj216] - yyv219.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj216 < len(yyv216) { - yyv216 = yyv216[:yyj216] - yyc216 = true - } else if yyj216 == 0 && yyv216 == nil { - yyv216 = []ClusterCondition{} - yyc216 = true - } - } - yyh216.End() - if yyc216 { - *v = yyv216 - } -} - -func (x codecSelfer1234) encSliceCluster(v []Cluster, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv220 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy221 := &yyv220 - yy221.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCluster(v *[]Cluster, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv222 := *v - yyh222, yyl222 := z.DecSliceHelperStart() - var yyc222 bool - if yyl222 == 0 { - if yyv222 == nil { - yyv222 = []Cluster{} - yyc222 = true - } else if len(yyv222) != 0 { - yyv222 = yyv222[:0] - yyc222 = true - } - } else if yyl222 > 0 { - var yyrr222, yyrl222 int - var yyrt222 bool - if yyl222 > cap(yyv222) { - - yyrg222 := len(yyv222) > 0 - yyv2222 := yyv222 - yyrl222, yyrt222 = z.DecInferLen(yyl222, z.DecBasicHandle().MaxInitLen, 352) - if yyrt222 { - if yyrl222 <= cap(yyv222) { - yyv222 = yyv222[:yyrl222] - } else { - yyv222 = make([]Cluster, yyrl222) - } - } else { - yyv222 = make([]Cluster, yyrl222) - } - yyc222 = true - yyrr222 = len(yyv222) - if yyrg222 { - copy(yyv222, yyv2222) - } - } else if yyl222 != len(yyv222) { - yyv222 = yyv222[:yyl222] - yyc222 = true - } - yyj222 := 0 - for ; yyj222 < yyrr222; yyj222++ { - yyh222.ElemContainerState(yyj222) - if r.TryDecodeAsNil() { - yyv222[yyj222] = Cluster{} - } else { - yyv223 := &yyv222[yyj222] - yyv223.CodecDecodeSelf(d) - } - - } - if yyrt222 { - for ; yyj222 < yyl222; yyj222++ { - yyv222 = append(yyv222, Cluster{}) - yyh222.ElemContainerState(yyj222) - if r.TryDecodeAsNil() { - yyv222[yyj222] = Cluster{} - } else { - yyv224 := &yyv222[yyj222] - yyv224.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj222 := 0 - for ; !r.CheckBreak(); yyj222++ { - - if yyj222 >= len(yyv222) { - yyv222 = append(yyv222, Cluster{}) // var yyz222 Cluster - yyc222 = true - } - yyh222.ElemContainerState(yyj222) - if yyj222 < len(yyv222) { - if r.TryDecodeAsNil() { - yyv222[yyj222] = Cluster{} - } else { - yyv225 := &yyv222[yyj222] - yyv225.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj222 < len(yyv222) { - yyv222 = yyv222[:yyj222] - yyc222 = true - } else if yyj222 == 0 && yyv222 == nil { - yyv222 = []Cluster{} - yyc222 = true - } - } - yyh222.End() - if yyc222 { - *v = yyv222 - } -} - -func (x codecSelfer1234) encMapstringClusterReplicaSetPreferences(v map[string]ClusterReplicaSetPreferences, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeMapStart(len(v)) - for yyk226, yyv226 := range v { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - yym227 := z.EncBinary() - _ = yym227 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(yyk226)) - } - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy228 := &yyv226 - yy228.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x codecSelfer1234) decMapstringClusterReplicaSetPreferences(v *map[string]ClusterReplicaSetPreferences, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv229 := *v - yyl229 := r.ReadMapStart() - yybh229 := z.DecBasicHandle() - if yyv229 == nil { - yyrl229, _ := z.DecInferLen(yyl229, yybh229.MaxInitLen, 40) - yyv229 = make(map[string]ClusterReplicaSetPreferences, yyrl229) - *v = yyv229 - } - var yymk229 string - var yymv229 ClusterReplicaSetPreferences - var yymg229 bool - if yybh229.MapValueReset { - yymg229 = true - } - if yyl229 > 0 { - for yyj229 := 0; yyj229 < yyl229; yyj229++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk229 = "" - } else { - yymk229 = string(r.DecodeString()) - } - - if yymg229 { - yymv229 = yyv229[yymk229] - } else { - yymv229 = ClusterReplicaSetPreferences{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv229 = ClusterReplicaSetPreferences{} - } else { - yyv231 := &yymv229 - yyv231.CodecDecodeSelf(d) - } - - if yyv229 != nil { - yyv229[yymk229] = yymv229 - } - } - } else if yyl229 < 0 { - for yyj229 := 0; !r.CheckBreak(); yyj229++ { - z.DecSendContainerState(codecSelfer_containerMapKey1234) - if r.TryDecodeAsNil() { - yymk229 = "" - } else { - yymk229 = string(r.DecodeString()) - } - - if yymg229 { - yymv229 = yyv229[yymk229] - } else { - yymv229 = ClusterReplicaSetPreferences{} - } - z.DecSendContainerState(codecSelfer_containerMapValue1234) - if r.TryDecodeAsNil() { - yymv229 = ClusterReplicaSetPreferences{} - } else { - yyv233 := &yymv229 - yyv233.CodecDecodeSelf(d) - } - - if yyv229 != nil { - yyv229[yymk229] = yymv229 - } - } - } // else len==0: TODO: Should we clear map entries? - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/types.go b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/types.go deleted file mode 100644 index b1bd520f71fc..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/types.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package federation - -import ( - "k8s.io/client-go/1.4/pkg/api" - "k8s.io/client-go/1.4/pkg/api/unversioned" -) - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -type ClusterSpec struct { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR"` - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - SecretRef *api.LocalObjectReference `json:"secretRef,omitempty"` -} - -type ClusterConditionType string - -// These are valid conditions of a cluster. -const ( - // ClusterReady means the cluster is ready to accept workloads. - ClusterReady ClusterConditionType = "Ready" - // ClusterOffline means the cluster is temporarily down or not reachable - ClusterOffline ClusterConditionType = "Offline" -) - -// ClusterCondition describes current state of a cluster. -type ClusterCondition struct { - // Type of cluster condition, Complete or Failed. - Type ClusterConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status api.ConditionStatus `json:"status"` - // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty"` -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -type ClusterStatus struct { - // Conditions is an array of current cluster conditions. - Conditions []ClusterCondition `json:"conditions,omitempty"` - // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - Zones []string `json:"zones,omitempty"` - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - Region string `json:"region,omitempty"` -} - -// +genclient=true -// +nonNamespaced=true - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -type Cluster struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - api.ObjectMeta `json:"metadata,omitempty"` - - // Spec defines the behavior of the Cluster. - Spec ClusterSpec `json:"spec,omitempty"` - // Status describes the current status of a Cluster - Status ClusterStatus `json:"status,omitempty"` -} - -// A list of all the kubernetes clusters registered to the federation -type ClusterList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty"` - - // List of Cluster objects. - Items []Cluster `json:"items"` -} - -// Temporary/alpha stuctures to support custom replica assignments within FederatedReplicaSet. - -// A set of preferences that can be added to federated version of ReplicaSet as a json-serialized annotation. -// The preferences allow the user to express in which culsters he wants to put his replicas within the -// mentiond FederatedReplicaSet. -type FederatedReplicaSetPreferences struct { - // If set to true then already scheduled and running replicas may be moved to other clusters to - // in order to bring cluster replicasets towards a desired state. Otherwise, if set to false, - // up and running replicas will not be moved. - Rebalance bool `json:"rebalance,omitempty"` - - // A mapping between cluser names and preferences regarding local replicasets in these clusters. - // "*" (if provided) applies to all clusters if an explicit mapping is not provided. If there is no - // "*" that clusters without explicit preferences should not have any replicas scheduled. - Clusters map[string]ClusterReplicaSetPreferences `json:"clusters,omitempty"` -} - -// Preferences regarding number of replicas assigned to a cluster replicaset within a federated replicaset. -type ClusterReplicaSetPreferences struct { - // Minimum number of replicas that should be assigned to this Local ReplicaSet. 0 by default. - MinReplicas int64 `json:"minReplicas,omitempty"` - - // Maximum number of replicas that should be assigned to this Local ReplicaSet. Unbounded if no value provided (default). - MaxReplicas *int64 `json:"maxReplicas,omitempty"` - - // A number expressing the preference to put an additional replica to this LocalReplicaSet. 0 by default. - Weight int64 -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/generated.pb.go b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/generated.pb.go deleted file mode 100644 index 247b085d9967..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/generated.pb.go +++ /dev/null @@ -1,1540 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/federation/apis/federation/v1beta1/generated.proto - - It has these top-level messages: - Cluster - ClusterCondition - ClusterList - ClusterSpec - ClusterStatus - ServerAddressByClientCIDR -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_kubernetes_pkg_api_v1 "k8s.io/client-go/1.4/pkg/api/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 - -func (m *Cluster) Reset() { *m = Cluster{} } -func (*Cluster) ProtoMessage() {} -func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ClusterCondition) Reset() { *m = ClusterCondition{} } -func (*ClusterCondition) ProtoMessage() {} -func (*ClusterCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ClusterList) Reset() { *m = ClusterList{} } -func (*ClusterList) ProtoMessage() {} -func (*ClusterList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } -func (*ClusterSpec) ProtoMessage() {} -func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } -func (*ClusterStatus) ProtoMessage() {} -func (*ClusterStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } -func (*ServerAddressByClientCIDR) ProtoMessage() {} -func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} -} - -func init() { - proto.RegisterType((*Cluster)(nil), "k8s.io.client-go.1.4.federation.apis.federation.v1beta1.Cluster") - proto.RegisterType((*ClusterCondition)(nil), "k8s.io.client-go.1.4.federation.apis.federation.v1beta1.ClusterCondition") - proto.RegisterType((*ClusterList)(nil), "k8s.io.client-go.1.4.federation.apis.federation.v1beta1.ClusterList") - proto.RegisterType((*ClusterSpec)(nil), "k8s.io.client-go.1.4.federation.apis.federation.v1beta1.ClusterSpec") - proto.RegisterType((*ClusterStatus)(nil), "k8s.io.client-go.1.4.federation.apis.federation.v1beta1.ClusterStatus") - proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.client-go.1.4.federation.apis.federation.v1beta1.ServerAddressByClientCIDR") -} -func (m *Cluster) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Cluster) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *ClusterCondition) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterCondition) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Type))) - i += copy(data[i:], m.Type) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Status))) - i += copy(data[i:], m.Status) - data[i] = 0x1a - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - data[i] = 0x22 - i++ - i = encodeVarintGenerated(data, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - data[i] = 0x2a - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Reason))) - i += copy(data[i:], m.Reason) - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) - return i, nil -} - -func (m *ClusterList) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterList) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterSpec) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterSpec) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.SecretRef != nil { - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(m.SecretRef.Size())) - n7, err := m.SecretRef.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func (m *ClusterStatus) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ClusterStatus) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Zones) > 0 { - for _, s := range m.Zones { - data[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - data[i] = uint8(l) - i++ - i += copy(data[i:], s) - } - } - data[i] = 0x32 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.Region))) - i += copy(data[i:], m.Region) - return i, nil -} - -func (m *ServerAddressByClientCIDR) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ServerAddressByClientCIDR) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ClientCIDR))) - i += copy(data[i:], m.ClientCIDR) - data[i] = 0x12 - i++ - i = encodeVarintGenerated(data, i, uint64(len(m.ServerAddress))) - i += copy(data[i:], m.ServerAddress) - return i, nil -} - -func encodeFixed64Generated(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Cluster) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterSpec) Size() (n int) { - var l int - _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClusterStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Zones) > 0 { - for _, s := range m.Zones { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Region) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServerAddressByClientCIDR) Size() (n int) { - var l int - _ = l - l = len(m.ClientCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ServerAddress) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Cluster) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Cluster{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_kubernetes_pkg_api_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterSpec", "ClusterSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterStatus", "ClusterStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_kubernetes_pkg_api_unversioned.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_kubernetes_pkg_api_unversioned.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Cluster", "Cluster", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterSpec{`, - `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "k8s_io_kubernetes_pkg_api_v1.LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ClusterCondition", "ClusterCondition", 1), `&`, ``, 1) + `,`, - `Zones:` + fmt.Sprintf("%v", this.Zones) + `,`, - `Region:` + fmt.Sprintf("%v", this.Region) + `,`, - `}`, - }, "") - return s -} -func (this *ServerAddressByClientCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServerAddressByClientCIDR{`, - `ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`, - `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Cluster) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Cluster: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterCondition) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ClusterConditionType(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_kubernetes_pkg_api_v1.ConditionStatus(data[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterList) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Cluster{}) - if err := m.Items[len(m.Items)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterSpec) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &k8s_io_kubernetes_pkg_api_v1.LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterStatus) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ClusterCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Zones", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Zones = append(m.Zones, string(data[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Region = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerAddressByClientCIDR) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientCIDR = string(data[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddress = string(data[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -var fileDescriptorGenerated = []byte{ - // 776 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x54, 0xdf, 0x6a, 0x13, 0x4d, - 0x14, 0x6f, 0xfe, 0x37, 0xd3, 0x2f, 0xdf, 0x57, 0x86, 0x4f, 0x88, 0xb9, 0x68, 0x24, 0x88, 0xb4, - 0x68, 0x77, 0x69, 0x50, 0x28, 0x88, 0x42, 0x37, 0x45, 0x28, 0xa4, 0x54, 0xa6, 0x45, 0xa4, 0x20, - 0xb2, 0xd9, 0x9c, 0xa6, 0x6b, 0x92, 0xdd, 0x30, 0x33, 0x5b, 0x69, 0xaf, 0x7c, 0x00, 0x2f, 0x7c, - 0x08, 0xdf, 0x40, 0x7c, 0x87, 0x5e, 0xf6, 0xc2, 0x0b, 0xf1, 0xa2, 0x68, 0x7d, 0x0b, 0xaf, 0x9c, - 0x99, 0x9d, 0x6c, 0x76, 0xbb, 0x4d, 0xd0, 0xf6, 0x62, 0x60, 0xcf, 0xd9, 0x73, 0x7e, 0xbf, 0xdf, - 0x9c, 0x73, 0xe6, 0xa0, 0xcd, 0xfe, 0x3a, 0x33, 0x5c, 0xdf, 0xec, 0x07, 0x1d, 0xa0, 0x1e, 0x70, - 0x60, 0xe6, 0x01, 0x74, 0x81, 0xda, 0xdc, 0xf5, 0x3d, 0xd3, 0x1e, 0xb9, 0x09, 0xfb, 0x68, 0xad, - 0x03, 0xdc, 0x5e, 0x33, 0x7b, 0xe0, 0x49, 0x17, 0x74, 0x8d, 0x11, 0xf5, 0xb9, 0x8f, 0x1f, 0x86, - 0x28, 0xc6, 0x04, 0xc5, 0x98, 0x64, 0x19, 0x12, 0x25, 0x6e, 0x6b, 0x94, 0xda, 0x6a, 0xcf, 0xe5, - 0x87, 0x41, 0xc7, 0x70, 0xfc, 0xa1, 0xd9, 0xf3, 0x7b, 0xbe, 0xa9, 0xc0, 0x3a, 0xc1, 0x81, 0xb2, - 0x94, 0xa1, 0xbe, 0x42, 0x92, 0x5a, 0x33, 0x2d, 0x75, 0xd4, 0xef, 0x49, 0x8d, 0x26, 0x05, 0xe6, - 0x07, 0xd4, 0x81, 0xcb, 0xc2, 0x6a, 0x8f, 0xa6, 0xe7, 0x04, 0xde, 0x11, 0x50, 0x26, 0xf4, 0x40, - 0x37, 0x95, 0xf6, 0x60, 0x7a, 0xda, 0x51, 0xea, 0xf6, 0xb5, 0xd5, 0xab, 0xa3, 0x69, 0xe0, 0x71, - 0x77, 0x98, 0xd6, 0xb4, 0x76, 0x75, 0x78, 0xc0, 0xdd, 0x81, 0xe9, 0x7a, 0x9c, 0x71, 0x7a, 0x39, - 0xa5, 0xf1, 0x39, 0x8b, 0x4a, 0xad, 0x41, 0xc0, 0x38, 0x50, 0xfc, 0x12, 0xcd, 0x0f, 0x45, 0xf9, - 0xba, 0x36, 0xb7, 0xab, 0x99, 0x3b, 0x99, 0xe5, 0x85, 0xe6, 0xb2, 0x91, 0x2e, 0xbf, 0x40, 0x94, - 0x75, 0x17, 0xb5, 0x36, 0x76, 0x3a, 0x6f, 0xc0, 0xe1, 0xdb, 0x22, 0xc7, 0xc2, 0xa7, 0xe7, 0xf5, - 0xb9, 0x8b, 0xf3, 0x3a, 0x9a, 0xf8, 0x48, 0x84, 0x86, 0x1d, 0x94, 0x67, 0x23, 0x70, 0xaa, 0x59, - 0x85, 0xba, 0x61, 0x5c, 0xa7, 0xa9, 0x86, 0x96, 0xb9, 0x2b, 0x80, 0xac, 0x7f, 0x34, 0x5d, 0x5e, - 0x5a, 0x44, 0x81, 0xe3, 0x3e, 0x2a, 0x32, 0x6e, 0xf3, 0x80, 0x55, 0x73, 0x8a, 0xa6, 0x75, 0x33, - 0x1a, 0x05, 0x65, 0xfd, 0xab, 0x89, 0x8a, 0xa1, 0x4d, 0x34, 0x45, 0xe3, 0x5b, 0x0e, 0x2d, 0xea, - 0xc8, 0x96, 0xef, 0x75, 0x5d, 0x09, 0x81, 0xd7, 0x51, 0x9e, 0x1f, 0x8f, 0x40, 0x15, 0xaf, 0x6c, - 0xdd, 0x1d, 0x6b, 0xdc, 0x13, 0xbe, 0x5f, 0xe7, 0xf5, 0xff, 0x2f, 0xc7, 0x4b, 0x3f, 0x51, 0x19, - 0xf8, 0x45, 0xa4, 0x3d, 0xab, 0x72, 0x9f, 0x26, 0x69, 0x45, 0xf6, 0xcc, 0xc1, 0x31, 0x22, 0xcc, - 0xa4, 0x4c, 0x7c, 0x88, 0x2a, 0x03, 0x9b, 0xf1, 0xe7, 0xd4, 0xef, 0xc0, 0x9e, 0x18, 0x19, 0x5d, - 0x9a, 0xfb, 0x33, 0xfa, 0x1a, 0x9b, 0x5e, 0x43, 0xa6, 0x58, 0xb7, 0xb4, 0x96, 0x4a, 0x3b, 0x8e, - 0x44, 0x92, 0xc0, 0xf8, 0x2d, 0xc2, 0xd2, 0xb1, 0x47, 0x6d, 0x8f, 0x85, 0xb7, 0x93, 0x74, 0xf9, - 0xbf, 0xa7, 0xab, 0x69, 0x3a, 0xdc, 0x4e, 0xc1, 0x91, 0x2b, 0x28, 0xf0, 0x3d, 0x54, 0xa4, 0x60, - 0x33, 0xdf, 0xab, 0x16, 0x54, 0xe9, 0xa2, 0x8e, 0x11, 0xe5, 0x25, 0xfa, 0x2f, 0x5e, 0x41, 0xa5, - 0x21, 0x30, 0x66, 0xf7, 0xa0, 0x5a, 0x54, 0x81, 0xff, 0xe9, 0xc0, 0xd2, 0x76, 0xe8, 0x26, 0xe3, - 0xff, 0x8d, 0xb3, 0x0c, 0x5a, 0xd0, 0xcd, 0x6a, 0xbb, 0x8c, 0xe3, 0x57, 0xa9, 0x87, 0x61, 0xfe, - 0xe1, 0x8d, 0x64, 0xba, 0x7a, 0x1f, 0x8b, 0x9a, 0x6c, 0x7e, 0xec, 0x89, 0xbd, 0x8e, 0x0e, 0x2a, - 0xb8, 0x1c, 0x86, 0xb2, 0xf7, 0x39, 0x81, 0xfd, 0xe4, 0x46, 0x73, 0x6b, 0x55, 0x34, 0x53, 0x61, - 0x4b, 0x62, 0x92, 0x10, 0xba, 0xf1, 0x31, 0x1b, 0x5d, 0x49, 0x3e, 0x19, 0xfc, 0x29, 0x83, 0x6a, - 0x0c, 0xa8, 0x50, 0xba, 0xd1, 0xed, 0x8a, 0x25, 0xc7, 0xac, 0xe3, 0xd6, 0xc0, 0x05, 0x8f, 0xb7, - 0xb6, 0x36, 0x09, 0x13, 0xb7, 0x94, 0x4a, 0x76, 0xae, 0xa7, 0x64, 0x77, 0x1a, 0xae, 0xd5, 0xd0, - 0xda, 0x6a, 0x53, 0x43, 0x18, 0x99, 0x21, 0x0b, 0xbf, 0x46, 0x65, 0x06, 0x0e, 0x05, 0x4e, 0xe0, - 0x40, 0x2f, 0x93, 0xe6, 0xec, 0x15, 0xd5, 0xf6, 0x1d, 0x7b, 0x10, 0xee, 0x24, 0x91, 0x03, 0x14, - 0x3c, 0x07, 0xac, 0x8a, 0x90, 0x50, 0xde, 0x1d, 0x03, 0x91, 0x09, 0x66, 0xe3, 0x4b, 0x06, 0x55, - 0x12, 0x0b, 0x00, 0x9f, 0x20, 0xe4, 0x8c, 0x1f, 0xd7, 0xb8, 0x2e, 0xcf, 0x6e, 0xd4, 0xa1, 0xe8, - 0xad, 0x4e, 0x96, 0x66, 0xe4, 0x62, 0x24, 0xc6, 0x86, 0xeb, 0xa8, 0x70, 0x22, 0xc6, 0x88, 0x89, - 0xc9, 0xce, 0x89, 0x81, 0x2d, 0xcb, 0xae, 0xee, 0x4b, 0x07, 0x09, 0xfd, 0xe1, 0xec, 0xf7, 0x44, - 0xac, 0x1e, 0xe9, 0xd8, 0xec, 0x4b, 0x2f, 0xd1, 0x7f, 0x1b, 0xef, 0x33, 0xe8, 0xf6, 0xd4, 0x92, - 0xe3, 0xa6, 0xb8, 0x62, 0x64, 0xe9, 0xe5, 0x35, 0x91, 0x16, 0xfd, 0x21, 0xb1, 0x28, 0xfc, 0x18, - 0x55, 0x12, 0x7d, 0xd2, 0x7b, 0x2b, 0xda, 0x15, 0x09, 0x36, 0x92, 0x8c, 0xb5, 0x56, 0x4e, 0x7f, - 0x2c, 0xcd, 0x9d, 0x89, 0xf3, 0x55, 0x9c, 0x77, 0x17, 0x4b, 0x99, 0x53, 0x71, 0xce, 0xc4, 0xf9, - 0x2e, 0xce, 0x87, 0x9f, 0x4b, 0x73, 0xfb, 0x25, 0x5d, 0xb3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x48, 0x5d, 0x6b, 0x0c, 0x46, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/generated.proto b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/generated.proto deleted file mode 100644 index 2eef53fe7e43..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/generated.proto +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.kubernetes.federation.apis.federation.v1beta1; - -import "k8s.io/kubernetes/pkg/api/resource/generated.proto"; -import "k8s.io/kubernetes/pkg/api/unversioned/generated.proto"; -import "k8s.io/kubernetes/pkg/api/v1/generated.proto"; -import "k8s.io/kubernetes/pkg/runtime/generated.proto"; -import "k8s.io/kubernetes/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -message Cluster { - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - optional k8s.io.kubernetes.pkg.api.v1.ObjectMeta metadata = 1; - - // Spec defines the behavior of the Cluster. - optional ClusterSpec spec = 2; - - // Status describes the current status of a Cluster - optional ClusterStatus status = 3; -} - -// ClusterCondition describes current state of a cluster. -message ClusterCondition { - // Type of cluster condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - optional k8s.io.kubernetes.pkg.api.unversioned.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - optional string reason = 5; - - // Human readable message indicating details about last transition. - optional string message = 6; -} - -// A list of all the kubernetes clusters registered to the federation -message ClusterList { - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - optional k8s.io.kubernetes.pkg.api.unversioned.ListMeta metadata = 1; - - // List of Cluster objects. - repeated Cluster items = 2; -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -message ClusterSpec { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 1; - - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - optional k8s.io.kubernetes.pkg.api.v1.LocalObjectReference secretRef = 2; -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -message ClusterStatus { - // Conditions is an array of current cluster conditions. - repeated ClusterCondition conditions = 1; - - // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - repeated string zones = 5; - - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - optional string region = 6; -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -message ServerAddressByClientCIDR { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - optional string clientCIDR = 1; - - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - optional string serverAddress = 2; -} - diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/types.generated.go b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/types.generated.go deleted file mode 100644 index 4352c3c37cab..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/types.generated.go +++ /dev/null @@ -1,2334 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package v1beta1 - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - pkg2_unversioned "k8s.io/client-go/1.4/pkg/api/unversioned" - pkg1_v1 "k8s.io/client-go/1.4/pkg/api/v1" - pkg3_types "k8s.io/client-go/1.4/pkg/types" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81234 = 1 - codecSelferC_RAW1234 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1234 = 10 - codecSelferValueTypeMap1234 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1234 = 2 - codecSelfer_containerMapValue1234 = 3 - codecSelfer_containerMapEnd1234 = 4 - codecSelfer_containerArrayElem1234 = 6 - codecSelfer_containerArrayEnd1234 = 7 -) - -var ( - codecSelferBitsize1234 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1234 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1234 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 pkg2_unversioned.Time - var v1 pkg1_v1.LocalObjectReference - var v2 pkg3_types.UID - var v3 time.Time - _, _, _, _ = v0, v1, v2, v3 - } -} - -func (x *ServerAddressByClientCIDR) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [2]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(2) - } else { - yynn2 = 2 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("clientCIDR")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ClientCIDR)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym7 := z.EncBinary() - _ = yym7 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddress")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym8 := z.EncBinary() - _ = yym8 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.ServerAddress)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ServerAddressByClientCIDR) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym9 := z.DecBinary() - _ = yym9 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct10 := r.ContainerType() - if yyct10 == codecSelferValueTypeMap1234 { - yyl10 := r.ReadMapStart() - if yyl10 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl10, d) - } - } else if yyct10 == codecSelferValueTypeArray1234 { - yyl10 := r.ReadArrayStart() - if yyl10 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl10, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys11Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys11Slc - var yyhl11 bool = l >= 0 - for yyj11 := 0; ; yyj11++ { - if yyhl11 { - if yyj11 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys11Slc = r.DecodeBytes(yys11Slc, true, true) - yys11 := string(yys11Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys11 { - case "clientCIDR": - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - case "serverAddress": - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys11) - } // end switch yys11 - } // end for yyj11 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ServerAddressByClientCIDR) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ClientCIDR = "" - } else { - x.ClientCIDR = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddress = "" - } else { - x.ServerAddress = string(r.DecodeString()) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterSpec) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym17 := z.EncBinary() - _ = yym17 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep18 := !z.EncBinary() - yy2arr18 := z.EncBasicHandle().StructToArray - var yyq18 [2]bool - _, _, _ = yysep18, yyq18, yy2arr18 - const yyr18 bool = false - yyq18[1] = x.SecretRef != nil - var yynn18 int - if yyr18 || yy2arr18 { - r.EncodeArrayStart(2) - } else { - yynn18 = 1 - for _, b := range yyq18 { - if b { - yynn18++ - } - } - r.EncodeMapStart(yynn18) - yynn18 = 0 - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym20 := z.EncBinary() - _ = yym20 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("serverAddressByClientCIDRs")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.ServerAddressByClientCIDRs == nil { - r.EncodeNil() - } else { - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - h.encSliceServerAddressByClientCIDR(([]ServerAddressByClientCIDR)(x.ServerAddressByClientCIDRs), e) - } - } - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq18[1] { - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } else { - r.EncodeNil() - } - } else { - if yyq18[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("secretRef")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.SecretRef == nil { - r.EncodeNil() - } else { - x.SecretRef.CodecEncodeSelf(e) - } - } - } - if yyr18 || yy2arr18 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterSpec) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym23 := z.DecBinary() - _ = yym23 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct24 := r.ContainerType() - if yyct24 == codecSelferValueTypeMap1234 { - yyl24 := r.ReadMapStart() - if yyl24 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl24, d) - } - } else if yyct24 == codecSelferValueTypeArray1234 { - yyl24 := r.ReadArrayStart() - if yyl24 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl24, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterSpec) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys25Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys25Slc - var yyhl25 bool = l >= 0 - for yyj25 := 0; ; yyj25++ { - if yyhl25 { - if yyj25 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys25Slc = r.DecodeBytes(yys25Slc, true, true) - yys25 := string(yys25Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys25 { - case "serverAddressByClientCIDRs": - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv26 := &x.ServerAddressByClientCIDRs - yym27 := z.DecBinary() - _ = yym27 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv26), d) - } - } - case "secretRef": - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_v1.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys25) - } // end switch yys25 - } // end for yyj25 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterSpec) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj29 int - var yyb29 bool - var yyhl29 bool = l >= 0 - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ServerAddressByClientCIDRs = nil - } else { - yyv30 := &x.ServerAddressByClientCIDRs - yym31 := z.DecBinary() - _ = yym31 - if false { - } else { - h.decSliceServerAddressByClientCIDR((*[]ServerAddressByClientCIDR)(yyv30), d) - } - } - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - if x.SecretRef != nil { - x.SecretRef = nil - } - } else { - if x.SecretRef == nil { - x.SecretRef = new(pkg1_v1.LocalObjectReference) - } - x.SecretRef.CodecDecodeSelf(d) - } - for { - yyj29++ - if yyhl29 { - yyb29 = yyj29 > l - } else { - yyb29 = r.CheckBreak() - } - if yyb29 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj29-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x ClusterConditionType) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - yym33 := z.EncBinary() - _ = yym33 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x)) - } -} - -func (x *ClusterConditionType) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym34 := z.DecBinary() - _ = yym34 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - *((*string)(x)) = r.DecodeString() - } -} - -func (x *ClusterCondition) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym35 := z.EncBinary() - _ = yym35 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep36 := !z.EncBinary() - yy2arr36 := z.EncBasicHandle().StructToArray - var yyq36 [6]bool - _, _, _ = yysep36, yyq36, yy2arr36 - const yyr36 bool = false - yyq36[2] = true - yyq36[3] = true - yyq36[4] = x.Reason != "" - yyq36[5] = x.Message != "" - var yynn36 int - if yyr36 || yy2arr36 { - r.EncodeArrayStart(6) - } else { - yynn36 = 2 - for _, b := range yyq36 { - if b { - yynn36++ - } - } - r.EncodeMapStart(yynn36) - yynn36 = 0 - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - x.Type.CodecEncodeSelf(e) - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("type")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - x.Type.CodecEncodeSelf(e) - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yym39 := z.EncBinary() - _ = yym39 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym40 := z.EncBinary() - _ = yym40 - if false { - } else if z.HasExtensions() && z.EncExt(x.Status) { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Status)) - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[2] { - yy42 := &x.LastProbeTime - yym43 := z.EncBinary() - _ = yym43 - if false { - } else if z.HasExtensions() && z.EncExt(yy42) { - } else if yym43 { - z.EncBinaryMarshal(yy42) - } else if !yym43 && z.IsJSONHandle() { - z.EncJSONMarshal(yy42) - } else { - z.EncFallback(yy42) - } - } else { - r.EncodeNil() - } - } else { - if yyq36[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastProbeTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy44 := &x.LastProbeTime - yym45 := z.EncBinary() - _ = yym45 - if false { - } else if z.HasExtensions() && z.EncExt(yy44) { - } else if yym45 { - z.EncBinaryMarshal(yy44) - } else if !yym45 && z.IsJSONHandle() { - z.EncJSONMarshal(yy44) - } else { - z.EncFallback(yy44) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[3] { - yy47 := &x.LastTransitionTime - yym48 := z.EncBinary() - _ = yym48 - if false { - } else if z.HasExtensions() && z.EncExt(yy47) { - } else if yym48 { - z.EncBinaryMarshal(yy47) - } else if !yym48 && z.IsJSONHandle() { - z.EncJSONMarshal(yy47) - } else { - z.EncFallback(yy47) - } - } else { - r.EncodeNil() - } - } else { - if yyq36[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("lastTransitionTime")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy49 := &x.LastTransitionTime - yym50 := z.EncBinary() - _ = yym50 - if false { - } else if z.HasExtensions() && z.EncExt(yy49) { - } else if yym50 { - z.EncBinaryMarshal(yy49) - } else if !yym50 && z.IsJSONHandle() { - z.EncJSONMarshal(yy49) - } else { - z.EncFallback(yy49) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[4] { - yym52 := z.EncBinary() - _ = yym52 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq36[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("reason")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym53 := z.EncBinary() - _ = yym53 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Reason)) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq36[5] { - yym55 := z.EncBinary() - _ = yym55 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq36[5] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("message")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym56 := z.EncBinary() - _ = yym56 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Message)) - } - } - } - if yyr36 || yy2arr36 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterCondition) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym57 := z.DecBinary() - _ = yym57 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct58 := r.ContainerType() - if yyct58 == codecSelferValueTypeMap1234 { - yyl58 := r.ReadMapStart() - if yyl58 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl58, d) - } - } else if yyct58 == codecSelferValueTypeArray1234 { - yyl58 := r.ReadArrayStart() - if yyl58 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl58, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterCondition) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys59Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys59Slc - var yyhl59 bool = l >= 0 - for yyj59 := 0; ; yyj59++ { - if yyhl59 { - if yyj59 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys59Slc = r.DecodeBytes(yys59Slc, true, true) - yys59 := string(yys59Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys59 { - case "type": - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_v1.ConditionStatus(r.DecodeString()) - } - case "lastProbeTime": - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv62 := &x.LastProbeTime - yym63 := z.DecBinary() - _ = yym63 - if false { - } else if z.HasExtensions() && z.DecExt(yyv62) { - } else if yym63 { - z.DecBinaryUnmarshal(yyv62) - } else if !yym63 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv62) - } else { - z.DecFallback(yyv62, false) - } - } - case "lastTransitionTime": - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv64 := &x.LastTransitionTime - yym65 := z.DecBinary() - _ = yym65 - if false { - } else if z.HasExtensions() && z.DecExt(yyv64) { - } else if yym65 { - z.DecBinaryUnmarshal(yyv64) - } else if !yym65 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv64) - } else { - z.DecFallback(yyv64, false) - } - } - case "reason": - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - case "message": - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys59) - } // end switch yys59 - } // end for yyj59 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterCondition) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj68 int - var yyb68 bool - var yyhl68 bool = l >= 0 - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Type = "" - } else { - x.Type = ClusterConditionType(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = "" - } else { - x.Status = pkg1_v1.ConditionStatus(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastProbeTime = pkg2_unversioned.Time{} - } else { - yyv71 := &x.LastProbeTime - yym72 := z.DecBinary() - _ = yym72 - if false { - } else if z.HasExtensions() && z.DecExt(yyv71) { - } else if yym72 { - z.DecBinaryUnmarshal(yyv71) - } else if !yym72 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv71) - } else { - z.DecFallback(yyv71, false) - } - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.LastTransitionTime = pkg2_unversioned.Time{} - } else { - yyv73 := &x.LastTransitionTime - yym74 := z.DecBinary() - _ = yym74 - if false { - } else if z.HasExtensions() && z.DecExt(yyv73) { - } else if yym74 { - z.DecBinaryUnmarshal(yyv73) - } else if !yym74 && z.IsJSONHandle() { - z.DecJSONUnmarshal(yyv73) - } else { - z.DecFallback(yyv73, false) - } - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Reason = "" - } else { - x.Reason = string(r.DecodeString()) - } - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Message = "" - } else { - x.Message = string(r.DecodeString()) - } - for { - yyj68++ - if yyhl68 { - yyb68 = yyj68 > l - } else { - yyb68 = r.CheckBreak() - } - if yyb68 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj68-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterStatus) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym77 := z.EncBinary() - _ = yym77 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep78 := !z.EncBinary() - yy2arr78 := z.EncBasicHandle().StructToArray - var yyq78 [3]bool - _, _, _ = yysep78, yyq78, yy2arr78 - const yyr78 bool = false - yyq78[0] = len(x.Conditions) != 0 - yyq78[1] = len(x.Zones) != 0 - yyq78[2] = x.Region != "" - var yynn78 int - if yyr78 || yy2arr78 { - r.EncodeArrayStart(3) - } else { - yynn78 = 0 - for _, b := range yyq78 { - if b { - yynn78++ - } - } - r.EncodeMapStart(yynn78) - yynn78 = 0 - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[0] { - if x.Conditions == nil { - r.EncodeNil() - } else { - yym80 := z.EncBinary() - _ = yym80 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq78[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("conditions")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Conditions == nil { - r.EncodeNil() - } else { - yym81 := z.EncBinary() - _ = yym81 - if false { - } else { - h.encSliceClusterCondition(([]ClusterCondition)(x.Conditions), e) - } - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[1] { - if x.Zones == nil { - r.EncodeNil() - } else { - yym83 := z.EncBinary() - _ = yym83 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq78[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("zones")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Zones == nil { - r.EncodeNil() - } else { - yym84 := z.EncBinary() - _ = yym84 - if false { - } else { - z.F.EncSliceStringV(x.Zones, false, e) - } - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq78[2] { - yym86 := z.EncBinary() - _ = yym86 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq78[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("region")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym87 := z.EncBinary() - _ = yym87 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Region)) - } - } - } - if yyr78 || yy2arr78 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterStatus) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym88 := z.DecBinary() - _ = yym88 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct89 := r.ContainerType() - if yyct89 == codecSelferValueTypeMap1234 { - yyl89 := r.ReadMapStart() - if yyl89 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl89, d) - } - } else if yyct89 == codecSelferValueTypeArray1234 { - yyl89 := r.ReadArrayStart() - if yyl89 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl89, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterStatus) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys90Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys90Slc - var yyhl90 bool = l >= 0 - for yyj90 := 0; ; yyj90++ { - if yyhl90 { - if yyj90 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys90Slc = r.DecodeBytes(yys90Slc, true, true) - yys90 := string(yys90Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys90 { - case "conditions": - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv91 := &x.Conditions - yym92 := z.DecBinary() - _ = yym92 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv91), d) - } - } - case "zones": - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv93 := &x.Zones - yym94 := z.DecBinary() - _ = yym94 - if false { - } else { - z.F.DecSliceStringX(yyv93, false, d) - } - } - case "region": - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - default: - z.DecStructFieldNotFound(-1, yys90) - } // end switch yys90 - } // end for yyj90 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterStatus) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj96 int - var yyb96 bool - var yyhl96 bool = l >= 0 - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Conditions = nil - } else { - yyv97 := &x.Conditions - yym98 := z.DecBinary() - _ = yym98 - if false { - } else { - h.decSliceClusterCondition((*[]ClusterCondition)(yyv97), d) - } - } - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Zones = nil - } else { - yyv99 := &x.Zones - yym100 := z.DecBinary() - _ = yym100 - if false { - } else { - z.F.DecSliceStringX(yyv99, false, d) - } - } - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Region = "" - } else { - x.Region = string(r.DecodeString()) - } - for { - yyj96++ - if yyhl96 { - yyb96 = yyj96 > l - } else { - yyb96 = r.CheckBreak() - } - if yyb96 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj96-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *Cluster) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym102 := z.EncBinary() - _ = yym102 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep103 := !z.EncBinary() - yy2arr103 := z.EncBasicHandle().StructToArray - var yyq103 [5]bool - _, _, _ = yysep103, yyq103, yy2arr103 - const yyr103 bool = false - yyq103[0] = x.Kind != "" - yyq103[1] = x.APIVersion != "" - yyq103[2] = true - yyq103[3] = true - yyq103[4] = true - var yynn103 int - if yyr103 || yy2arr103 { - r.EncodeArrayStart(5) - } else { - yynn103 = 0 - for _, b := range yyq103 { - if b { - yynn103++ - } - } - r.EncodeMapStart(yynn103) - yynn103 = 0 - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[0] { - yym105 := z.EncBinary() - _ = yym105 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq103[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym106 := z.EncBinary() - _ = yym106 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[1] { - yym108 := z.EncBinary() - _ = yym108 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq103[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym109 := z.EncBinary() - _ = yym109 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[2] { - yy111 := &x.ObjectMeta - yy111.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy112 := &x.ObjectMeta - yy112.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[3] { - yy114 := &x.Spec - yy114.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[3] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("spec")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy115 := &x.Spec - yy115.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq103[4] { - yy117 := &x.Status - yy117.CodecEncodeSelf(e) - } else { - r.EncodeNil() - } - } else { - if yyq103[4] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("status")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy118 := &x.Status - yy118.CodecEncodeSelf(e) - } - } - if yyr103 || yy2arr103 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *Cluster) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym119 := z.DecBinary() - _ = yym119 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct120 := r.ContainerType() - if yyct120 == codecSelferValueTypeMap1234 { - yyl120 := r.ReadMapStart() - if yyl120 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl120, d) - } - } else if yyct120 == codecSelferValueTypeArray1234 { - yyl120 := r.ReadArrayStart() - if yyl120 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl120, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *Cluster) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys121Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys121Slc - var yyhl121 bool = l >= 0 - for yyj121 := 0; ; yyj121++ { - if yyhl121 { - if yyj121 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys121Slc = r.DecodeBytes(yys121Slc, true, true) - yys121 := string(yys121Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys121 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv124 := &x.ObjectMeta - yyv124.CodecDecodeSelf(d) - } - case "spec": - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv125 := &x.Spec - yyv125.CodecDecodeSelf(d) - } - case "status": - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv126 := &x.Status - yyv126.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys121) - } // end switch yys121 - } // end for yyj121 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *Cluster) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj127 int - var yyb127 bool - var yyhl127 bool = l >= 0 - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ObjectMeta = pkg1_v1.ObjectMeta{} - } else { - yyv130 := &x.ObjectMeta - yyv130.CodecDecodeSelf(d) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Spec = ClusterSpec{} - } else { - yyv131 := &x.Spec - yyv131.CodecDecodeSelf(d) - } - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Status = ClusterStatus{} - } else { - yyv132 := &x.Status - yyv132.CodecDecodeSelf(d) - } - for { - yyj127++ - if yyhl127 { - yyb127 = yyj127 > l - } else { - yyb127 = r.CheckBreak() - } - if yyb127 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj127-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x *ClusterList) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym133 := z.EncBinary() - _ = yym133 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep134 := !z.EncBinary() - yy2arr134 := z.EncBasicHandle().StructToArray - var yyq134 [4]bool - _, _, _ = yysep134, yyq134, yy2arr134 - const yyr134 bool = false - yyq134[0] = x.Kind != "" - yyq134[1] = x.APIVersion != "" - yyq134[2] = true - var yynn134 int - if yyr134 || yy2arr134 { - r.EncodeArrayStart(4) - } else { - yynn134 = 1 - for _, b := range yyq134 { - if b { - yynn134++ - } - } - r.EncodeMapStart(yynn134) - yynn134 = 0 - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[0] { - yym136 := z.EncBinary() - _ = yym136 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq134[0] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("kind")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym137 := z.EncBinary() - _ = yym137 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.Kind)) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[1] { - yym139 := z.EncBinary() - _ = yym139 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } else { - r.EncodeString(codecSelferC_UTF81234, "") - } - } else { - if yyq134[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("apiVersion")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yym140 := z.EncBinary() - _ = yym140 - if false { - } else { - r.EncodeString(codecSelferC_UTF81234, string(x.APIVersion)) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if yyq134[2] { - yy142 := &x.ListMeta - yym143 := z.EncBinary() - _ = yym143 - if false { - } else if z.HasExtensions() && z.EncExt(yy142) { - } else { - z.EncFallback(yy142) - } - } else { - r.EncodeNil() - } - } else { - if yyq134[2] { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("metadata")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - yy144 := &x.ListMeta - yym145 := z.EncBinary() - _ = yym145 - if false { - } else if z.HasExtensions() && z.EncExt(yy144) { - } else { - z.EncFallback(yy144) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym147 := z.EncBinary() - _ = yym147 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1234) - r.EncodeString(codecSelferC_UTF81234, string("items")) - z.EncSendContainerState(codecSelfer_containerMapValue1234) - if x.Items == nil { - r.EncodeNil() - } else { - yym148 := z.EncBinary() - _ = yym148 - if false { - } else { - h.encSliceCluster(([]Cluster)(x.Items), e) - } - } - } - if yyr134 || yy2arr134 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1234) - } - } - } -} - -func (x *ClusterList) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym149 := z.DecBinary() - _ = yym149 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct150 := r.ContainerType() - if yyct150 == codecSelferValueTypeMap1234 { - yyl150 := r.ReadMapStart() - if yyl150 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1234) - } else { - x.codecDecodeSelfFromMap(yyl150, d) - } - } else if yyct150 == codecSelferValueTypeArray1234 { - yyl150 := r.ReadArrayStart() - if yyl150 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - } else { - x.codecDecodeSelfFromArray(yyl150, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1234) - } - } -} - -func (x *ClusterList) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys151Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys151Slc - var yyhl151 bool = l >= 0 - for yyj151 := 0; ; yyj151++ { - if yyhl151 { - if yyj151 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1234) - yys151Slc = r.DecodeBytes(yys151Slc, true, true) - yys151 := string(yys151Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1234) - switch yys151 { - case "kind": - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - case "apiVersion": - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - case "metadata": - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv154 := &x.ListMeta - yym155 := z.DecBinary() - _ = yym155 - if false { - } else if z.HasExtensions() && z.DecExt(yyv154) { - } else { - z.DecFallback(yyv154, false) - } - } - case "items": - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv156 := &x.Items - yym157 := z.DecBinary() - _ = yym157 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv156), d) - } - } - default: - z.DecStructFieldNotFound(-1, yys151) - } // end switch yys151 - } // end for yyj151 - z.DecSendContainerState(codecSelfer_containerMapEnd1234) -} - -func (x *ClusterList) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj158 int - var yyb158 bool - var yyhl158 bool = l >= 0 - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Kind = "" - } else { - x.Kind = string(r.DecodeString()) - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.APIVersion = "" - } else { - x.APIVersion = string(r.DecodeString()) - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.ListMeta = pkg2_unversioned.ListMeta{} - } else { - yyv161 := &x.ListMeta - yym162 := z.DecBinary() - _ = yym162 - if false { - } else if z.HasExtensions() && z.DecExt(yyv161) { - } else { - z.DecFallback(yyv161, false) - } - } - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - if r.TryDecodeAsNil() { - x.Items = nil - } else { - yyv163 := &x.Items - yym164 := z.DecBinary() - _ = yym164 - if false { - } else { - h.decSliceCluster((*[]Cluster)(yyv163), d) - } - } - for { - yyj158++ - if yyhl158 { - yyb158 = yyj158 > l - } else { - yyb158 = r.CheckBreak() - } - if yyb158 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1234) - z.DecStructFieldNotFound(yyj158-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) encSliceServerAddressByClientCIDR(v []ServerAddressByClientCIDR, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv165 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy166 := &yyv165 - yy166.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceServerAddressByClientCIDR(v *[]ServerAddressByClientCIDR, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv167 := *v - yyh167, yyl167 := z.DecSliceHelperStart() - var yyc167 bool - if yyl167 == 0 { - if yyv167 == nil { - yyv167 = []ServerAddressByClientCIDR{} - yyc167 = true - } else if len(yyv167) != 0 { - yyv167 = yyv167[:0] - yyc167 = true - } - } else if yyl167 > 0 { - var yyrr167, yyrl167 int - var yyrt167 bool - if yyl167 > cap(yyv167) { - - yyrg167 := len(yyv167) > 0 - yyv2167 := yyv167 - yyrl167, yyrt167 = z.DecInferLen(yyl167, z.DecBasicHandle().MaxInitLen, 32) - if yyrt167 { - if yyrl167 <= cap(yyv167) { - yyv167 = yyv167[:yyrl167] - } else { - yyv167 = make([]ServerAddressByClientCIDR, yyrl167) - } - } else { - yyv167 = make([]ServerAddressByClientCIDR, yyrl167) - } - yyc167 = true - yyrr167 = len(yyv167) - if yyrg167 { - copy(yyv167, yyv2167) - } - } else if yyl167 != len(yyv167) { - yyv167 = yyv167[:yyl167] - yyc167 = true - } - yyj167 := 0 - for ; yyj167 < yyrr167; yyj167++ { - yyh167.ElemContainerState(yyj167) - if r.TryDecodeAsNil() { - yyv167[yyj167] = ServerAddressByClientCIDR{} - } else { - yyv168 := &yyv167[yyj167] - yyv168.CodecDecodeSelf(d) - } - - } - if yyrt167 { - for ; yyj167 < yyl167; yyj167++ { - yyv167 = append(yyv167, ServerAddressByClientCIDR{}) - yyh167.ElemContainerState(yyj167) - if r.TryDecodeAsNil() { - yyv167[yyj167] = ServerAddressByClientCIDR{} - } else { - yyv169 := &yyv167[yyj167] - yyv169.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj167 := 0 - for ; !r.CheckBreak(); yyj167++ { - - if yyj167 >= len(yyv167) { - yyv167 = append(yyv167, ServerAddressByClientCIDR{}) // var yyz167 ServerAddressByClientCIDR - yyc167 = true - } - yyh167.ElemContainerState(yyj167) - if yyj167 < len(yyv167) { - if r.TryDecodeAsNil() { - yyv167[yyj167] = ServerAddressByClientCIDR{} - } else { - yyv170 := &yyv167[yyj167] - yyv170.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj167 < len(yyv167) { - yyv167 = yyv167[:yyj167] - yyc167 = true - } else if yyj167 == 0 && yyv167 == nil { - yyv167 = []ServerAddressByClientCIDR{} - yyc167 = true - } - } - yyh167.End() - if yyc167 { - *v = yyv167 - } -} - -func (x codecSelfer1234) encSliceClusterCondition(v []ClusterCondition, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv171 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy172 := &yyv171 - yy172.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceClusterCondition(v *[]ClusterCondition, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv173 := *v - yyh173, yyl173 := z.DecSliceHelperStart() - var yyc173 bool - if yyl173 == 0 { - if yyv173 == nil { - yyv173 = []ClusterCondition{} - yyc173 = true - } else if len(yyv173) != 0 { - yyv173 = yyv173[:0] - yyc173 = true - } - } else if yyl173 > 0 { - var yyrr173, yyrl173 int - var yyrt173 bool - if yyl173 > cap(yyv173) { - - yyrg173 := len(yyv173) > 0 - yyv2173 := yyv173 - yyrl173, yyrt173 = z.DecInferLen(yyl173, z.DecBasicHandle().MaxInitLen, 112) - if yyrt173 { - if yyrl173 <= cap(yyv173) { - yyv173 = yyv173[:yyrl173] - } else { - yyv173 = make([]ClusterCondition, yyrl173) - } - } else { - yyv173 = make([]ClusterCondition, yyrl173) - } - yyc173 = true - yyrr173 = len(yyv173) - if yyrg173 { - copy(yyv173, yyv2173) - } - } else if yyl173 != len(yyv173) { - yyv173 = yyv173[:yyl173] - yyc173 = true - } - yyj173 := 0 - for ; yyj173 < yyrr173; yyj173++ { - yyh173.ElemContainerState(yyj173) - if r.TryDecodeAsNil() { - yyv173[yyj173] = ClusterCondition{} - } else { - yyv174 := &yyv173[yyj173] - yyv174.CodecDecodeSelf(d) - } - - } - if yyrt173 { - for ; yyj173 < yyl173; yyj173++ { - yyv173 = append(yyv173, ClusterCondition{}) - yyh173.ElemContainerState(yyj173) - if r.TryDecodeAsNil() { - yyv173[yyj173] = ClusterCondition{} - } else { - yyv175 := &yyv173[yyj173] - yyv175.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj173 := 0 - for ; !r.CheckBreak(); yyj173++ { - - if yyj173 >= len(yyv173) { - yyv173 = append(yyv173, ClusterCondition{}) // var yyz173 ClusterCondition - yyc173 = true - } - yyh173.ElemContainerState(yyj173) - if yyj173 < len(yyv173) { - if r.TryDecodeAsNil() { - yyv173[yyj173] = ClusterCondition{} - } else { - yyv176 := &yyv173[yyj173] - yyv176.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj173 < len(yyv173) { - yyv173 = yyv173[:yyj173] - yyc173 = true - } else if yyj173 == 0 && yyv173 == nil { - yyv173 = []ClusterCondition{} - yyc173 = true - } - } - yyh173.End() - if yyc173 { - *v = yyv173 - } -} - -func (x codecSelfer1234) encSliceCluster(v []Cluster, e *codec1978.Encoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv177 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1234) - yy178 := &yyv177 - yy178.CodecEncodeSelf(e) - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1234) -} - -func (x codecSelfer1234) decSliceCluster(v *[]Cluster, d *codec1978.Decoder) { - var h codecSelfer1234 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv179 := *v - yyh179, yyl179 := z.DecSliceHelperStart() - var yyc179 bool - if yyl179 == 0 { - if yyv179 == nil { - yyv179 = []Cluster{} - yyc179 = true - } else if len(yyv179) != 0 { - yyv179 = yyv179[:0] - yyc179 = true - } - } else if yyl179 > 0 { - var yyrr179, yyrl179 int - var yyrt179 bool - if yyl179 > cap(yyv179) { - - yyrg179 := len(yyv179) > 0 - yyv2179 := yyv179 - yyrl179, yyrt179 = z.DecInferLen(yyl179, z.DecBasicHandle().MaxInitLen, 352) - if yyrt179 { - if yyrl179 <= cap(yyv179) { - yyv179 = yyv179[:yyrl179] - } else { - yyv179 = make([]Cluster, yyrl179) - } - } else { - yyv179 = make([]Cluster, yyrl179) - } - yyc179 = true - yyrr179 = len(yyv179) - if yyrg179 { - copy(yyv179, yyv2179) - } - } else if yyl179 != len(yyv179) { - yyv179 = yyv179[:yyl179] - yyc179 = true - } - yyj179 := 0 - for ; yyj179 < yyrr179; yyj179++ { - yyh179.ElemContainerState(yyj179) - if r.TryDecodeAsNil() { - yyv179[yyj179] = Cluster{} - } else { - yyv180 := &yyv179[yyj179] - yyv180.CodecDecodeSelf(d) - } - - } - if yyrt179 { - for ; yyj179 < yyl179; yyj179++ { - yyv179 = append(yyv179, Cluster{}) - yyh179.ElemContainerState(yyj179) - if r.TryDecodeAsNil() { - yyv179[yyj179] = Cluster{} - } else { - yyv181 := &yyv179[yyj179] - yyv181.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj179 := 0 - for ; !r.CheckBreak(); yyj179++ { - - if yyj179 >= len(yyv179) { - yyv179 = append(yyv179, Cluster{}) // var yyz179 Cluster - yyc179 = true - } - yyh179.ElemContainerState(yyj179) - if yyj179 < len(yyv179) { - if r.TryDecodeAsNil() { - yyv179[yyj179] = Cluster{} - } else { - yyv182 := &yyv179[yyj179] - yyv182.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj179 < len(yyv179) { - yyv179 = yyv179[:yyj179] - yyc179 = true - } else if yyj179 == 0 && yyv179 == nil { - yyv179 = []Cluster{} - yyc179 = true - } - } - yyh179.End() - if yyc179 { - *v = yyv179 - } -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/types.go b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/types.go deleted file mode 100644 index 90b0db13f2d4..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/types.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/client-go/1.4/pkg/api/unversioned" - "k8s.io/client-go/1.4/pkg/api/v1" -) - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` -} - -// ClusterSpec describes the attributes of a kubernetes cluster. -type ClusterSpec struct { - // A map of client CIDR to server address. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" patchStrategy:"merge" patchMergeKey:"clientCIDR" protobuf:"bytes,1,rep,name=serverAddressByClientCIDRs"` - // Name of the secret containing kubeconfig to access this cluster. - // The secret is read from the kubernetes cluster that is hosting federation control plane. - // Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key "kubeconfig". - // This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. - // This can be left empty if the cluster allows insecure access. - SecretRef *v1.LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,2,opt,name=secretRef"` -} - -type ClusterConditionType string - -// These are valid conditions of a cluster. -const ( - // ClusterReady means the cluster is ready to accept workloads. - ClusterReady ClusterConditionType = "Ready" - // ClusterOffline means the cluster is temporarily down or not reachable - ClusterOffline ClusterConditionType = "Offline" -) - -// ClusterCondition describes current state of a cluster. -type ClusterCondition struct { - // Type of cluster condition, Complete or Failed. - Type ClusterConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ClusterConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` - // Last time the condition was checked. - LastProbeTime unversioned.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - LastTransitionTime unversioned.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally. -type ClusterStatus struct { - // Conditions is an array of current cluster conditions. - Conditions []ClusterCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` - // Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. - // These will always be in the same region. - Zones []string `json:"zones,omitempty" protobuf:"bytes,5,rep,name=zones"` - // Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'. - Region string `json:"region,omitempty" protobuf:"bytes,6,opt,name=region"` -} - -// +genclient=true -// +nonNamespaced=true - -// Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation. -type Cluster struct { - unversioned.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of the Cluster. - Spec ClusterSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Status describes the current status of a Cluster - Status ClusterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// A list of all the kubernetes clusters registered to the federation -type ClusterList struct { - unversioned.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds - unversioned.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of Cluster objects. - Items []Cluster `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 7c49777fb988..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_Cluster = map[string]string{ - "": "Information about a registered cluster in a federated kubernetes setup. Clusters are not namespaced and have unique names in the federation.", - "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Cluster.", - "status": "Status describes the current status of a Cluster", -} - -func (Cluster) SwaggerDoc() map[string]string { - return map_Cluster -} - -var map_ClusterCondition = map[string]string{ - "": "ClusterCondition describes current state of a cluster.", - "type": "Type of cluster condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (ClusterCondition) SwaggerDoc() map[string]string { - return map_ClusterCondition -} - -var map_ClusterList = map[string]string{ - "": "A list of all the kubernetes clusters registered to the federation", - "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds", - "items": "List of Cluster objects.", -} - -func (ClusterList) SwaggerDoc() map[string]string { - return map_ClusterList -} - -var map_ClusterSpec = map[string]string{ - "": "ClusterSpec describes the attributes of a kubernetes cluster.", - "serverAddressByClientCIDRs": "A map of client CIDR to server address. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR.", - "secretRef": "Name of the secret containing kubeconfig to access this cluster. The secret is read from the kubernetes cluster that is hosting federation control plane. Admin needs to ensure that the required secret exists. Secret should be in the same namespace where federation control plane is hosted and it should have kubeconfig in its data with key \"kubeconfig\". This will later be changed to a reference to secret in federation control plane when the federation control plane supports secrets. This can be left empty if the cluster allows insecure access.", -} - -func (ClusterSpec) SwaggerDoc() map[string]string { - return map_ClusterSpec -} - -var map_ClusterStatus = map[string]string{ - "": "ClusterStatus is information about the current status of a cluster updated by cluster controller peridocally.", - "conditions": "Conditions is an array of current cluster conditions.", - "zones": "Zones is the list of avaliability zones in which the nodes of the cluster exist, e.g. 'us-east1-a'. These will always be in the same region.", - "region": "Region is the name of the region in which all of the nodes in the cluster exist. e.g. 'us-east1'.", -} - -func (ClusterStatus) SwaggerDoc() map[string]string { - return map_ClusterStatus -} - -var map_ServerAddressByClientCIDR = map[string]string{ - "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", - "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", - "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", -} - -func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { - return map_ServerAddressByClientCIDR -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/zz_generated.conversion.go deleted file mode 100644 index 57e78aa1df87..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,300 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1beta1 - -import ( - api "k8s.io/client-go/1.4/pkg/api" - v1 "k8s.io/client-go/1.4/pkg/api/v1" - conversion "k8s.io/client-go/1.4/pkg/conversion" - federation "k8s.io/client-go/1.4/pkg/federation/apis/federation" - runtime "k8s.io/client-go/1.4/pkg/runtime" -) - -func init() { - SchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1beta1_Cluster_To_federation_Cluster, - Convert_federation_Cluster_To_v1beta1_Cluster, - Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition, - Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition, - Convert_v1beta1_ClusterList_To_federation_ClusterList, - Convert_federation_ClusterList_To_v1beta1_ClusterList, - Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec, - Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec, - Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus, - Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus, - Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR, - Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR, - ) -} - -func autoConvert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error { - return autoConvert_v1beta1_Cluster_To_federation_Cluster(in, out, s) -} - -func autoConvert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil { - return err - } - if err := Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func Convert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error { - return autoConvert_federation_Cluster_To_v1beta1_Cluster(in, out, s) -} - -func autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { - out.Type = federation.ClusterConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in, out, s) -} - -func autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { - out.Type = ClusterConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil { - return err - } - if err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -func Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error { - return autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in, out, s) -} - -func autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]federation.Cluster, len(*in)) - for i := range *in { - if err := Convert_v1beta1_Cluster_To_federation_Cluster(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in, out, s) -} - -func autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { - if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { - return err - } - if err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil { - return err - } - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - if err := Convert_federation_Cluster_To_v1beta1_Cluster(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -func Convert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error { - return autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in, out, s) -} - -func autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]federation.ServerAddressByClientCIDR, len(*in)) - for i := range *in { - if err := Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(api.LocalObjectReference) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.SecretRef = nil - } - return nil -} - -func Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in, out, s) -} - -func autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - for i := range *in { - if err := Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.LocalObjectReference) - // TODO: Inefficient conversion - can we improve it? - if err := s.Convert(*in, *out, 0); err != nil { - return err - } - } else { - out.SecretRef = nil - } - return nil -} - -func Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error { - return autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in, out, s) -} - -func autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]federation.ClusterCondition, len(*in)) - for i := range *in { - if err := Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.Zones = in.Zones - out.Region = in.Region - return nil -} - -func Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error { - return autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in, out, s) -} - -func autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(*in)) - for i := range *in { - if err := Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - out.Zones = in.Zones - out.Region = in.Region - return nil -} - -func Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { - return autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in, out, s) -} - -func autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} - -func Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error { - return autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in, out, s) -} - -func autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil -} - -func Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error { - return autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in, out, s) -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 445ddc6780cf..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package v1beta1 - -import ( - v1 "k8s.io/client-go/1.4/pkg/api/v1" - conversion "k8s.io/client-go/1.4/pkg/conversion" - runtime "k8s.io/client-go/1.4/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_Cluster, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterCondition, InType: reflect.TypeOf(&ClusterCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterList, InType: reflect.TypeOf(&ClusterList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterSpec, InType: reflect.TypeOf(&ClusterSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ClusterStatus, InType: reflect.TypeOf(&ClusterStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1beta1_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - ) -} - -func DeepCopy_v1beta1_Cluster(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Cluster) - out := out.(*Cluster) - out.TypeMeta = in.TypeMeta - if err := v1.DeepCopy_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ClusterSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_v1beta1_ClusterStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_v1beta1_ClusterCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterCondition) - out := out.(*ClusterCondition) - out.Type = in.Type - out.Status = in.Status - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message - return nil - } -} - -func DeepCopy_v1beta1_ClusterList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterList) - out := out.(*ClusterList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_Cluster(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil - } -} - -func DeepCopy_v1beta1_ClusterSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterSpec) - out := out.(*ClusterSpec) - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.LocalObjectReference) - **out = **in - } else { - out.SecretRef = nil - } - return nil - } -} - -func DeepCopy_v1beta1_ClusterStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterStatus) - out := out.(*ClusterStatus) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(*in)) - for i := range *in { - if err := DeepCopy_v1beta1_ClusterCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.Zones = nil - } - out.Region = in.Region - return nil - } -} - -func DeepCopy_v1beta1_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServerAddressByClientCIDR) - out := out.(*ServerAddressByClientCIDR) - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil - } -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/zz_generated.deepcopy.go deleted file mode 100644 index 612436ae368d..000000000000 --- a/vendor/k8s.io/client-go/1.4/pkg/federation/apis/federation/zz_generated.deepcopy.go +++ /dev/null @@ -1,200 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package federation - -import ( - api "k8s.io/client-go/1.4/pkg/api" - conversion "k8s.io/client-go/1.4/pkg/conversion" - runtime "k8s.io/client-go/1.4/pkg/runtime" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_Cluster, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterCondition, InType: reflect.TypeOf(&ClusterCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterList, InType: reflect.TypeOf(&ClusterList{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterReplicaSetPreferences, InType: reflect.TypeOf(&ClusterReplicaSetPreferences{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterSpec, InType: reflect.TypeOf(&ClusterSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ClusterStatus, InType: reflect.TypeOf(&ClusterStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_FederatedReplicaSetPreferences, InType: reflect.TypeOf(&FederatedReplicaSetPreferences{})}, - conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_federation_ServerAddressByClientCIDR, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - ) -} - -func DeepCopy_federation_Cluster(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*Cluster) - out := out.(*Cluster) - out.TypeMeta = in.TypeMeta - if err := api.DeepCopy_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, c); err != nil { - return err - } - if err := DeepCopy_federation_ClusterSpec(&in.Spec, &out.Spec, c); err != nil { - return err - } - if err := DeepCopy_federation_ClusterStatus(&in.Status, &out.Status, c); err != nil { - return err - } - return nil - } -} - -func DeepCopy_federation_ClusterCondition(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterCondition) - out := out.(*ClusterCondition) - out.Type = in.Type - out.Status = in.Status - out.LastProbeTime = in.LastProbeTime.DeepCopy() - out.LastTransitionTime = in.LastTransitionTime.DeepCopy() - out.Reason = in.Reason - out.Message = in.Message - return nil - } -} - -func DeepCopy_federation_ClusterList(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterList) - out := out.(*ClusterList) - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Cluster, len(*in)) - for i := range *in { - if err := DeepCopy_federation_Cluster(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil - } -} - -func DeepCopy_federation_ClusterReplicaSetPreferences(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterReplicaSetPreferences) - out := out.(*ClusterReplicaSetPreferences) - out.MinReplicas = in.MinReplicas - if in.MaxReplicas != nil { - in, out := &in.MaxReplicas, &out.MaxReplicas - *out = new(int64) - **out = **in - } else { - out.MaxReplicas = nil - } - out.Weight = in.Weight - return nil - } -} - -func DeepCopy_federation_ClusterSpec(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterSpec) - out := out.(*ClusterSpec) - if in.ServerAddressByClientCIDRs != nil { - in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs - *out = make([]ServerAddressByClientCIDR, len(*in)) - for i := range *in { - (*out)[i] = (*in)[i] - } - } else { - out.ServerAddressByClientCIDRs = nil - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(api.LocalObjectReference) - **out = **in - } else { - out.SecretRef = nil - } - return nil - } -} - -func DeepCopy_federation_ClusterStatus(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ClusterStatus) - out := out.(*ClusterStatus) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ClusterCondition, len(*in)) - for i := range *in { - if err := DeepCopy_federation_ClusterCondition(&(*in)[i], &(*out)[i], c); err != nil { - return err - } - } - } else { - out.Conditions = nil - } - if in.Zones != nil { - in, out := &in.Zones, &out.Zones - *out = make([]string, len(*in)) - copy(*out, *in) - } else { - out.Zones = nil - } - out.Region = in.Region - return nil - } -} - -func DeepCopy_federation_FederatedReplicaSetPreferences(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*FederatedReplicaSetPreferences) - out := out.(*FederatedReplicaSetPreferences) - out.Rebalance = in.Rebalance - if in.Clusters != nil { - in, out := &in.Clusters, &out.Clusters - *out = make(map[string]ClusterReplicaSetPreferences) - for key, val := range *in { - newVal := new(ClusterReplicaSetPreferences) - if err := DeepCopy_federation_ClusterReplicaSetPreferences(&val, newVal, c); err != nil { - return err - } - (*out)[key] = *newVal - } - } else { - out.Clusters = nil - } - return nil - } -} - -func DeepCopy_federation_ServerAddressByClientCIDR(in interface{}, out interface{}, c *conversion.Cloner) error { - { - in := in.(*ServerAddressByClientCIDR) - out := out.(*ServerAddressByClientCIDR) - out.ClientCIDR = in.ClientCIDR - out.ServerAddress = in.ServerAddress - return nil - } -} diff --git a/vendor/k8s.io/client-go/1.4/pkg/kubelet/qos/policy.go b/vendor/k8s.io/client-go/1.4/pkg/kubelet/qos/policy.go index f6e907d1afa9..c2c1d1d8f7fd 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/kubelet/qos/policy.go +++ b/vendor/k8s.io/client-go/1.4/pkg/kubelet/qos/policy.go @@ -21,8 +21,9 @@ import ( ) const ( - PodInfraOOMAdj int = -999 + PodInfraOOMAdj int = -998 KubeletOOMScoreAdj int = -999 + DockerOOMScoreAdj int = -999 KubeProxyOOMScoreAdj int = -999 guaranteedOOMScoreAdj int = -998 besteffortOOMScoreAdj int = 1000 @@ -53,10 +54,10 @@ func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCa // Note that this is a heuristic, it won't work if a container has many small processes. memoryRequest := container.Resources.Requests.Memory().Value() oomScoreAdjust := 1000 - (1000*memoryRequest)/memoryCapacity - // A guaranteed pod using 100% of memory can have an OOM score of 1. Ensure + // A guaranteed pod using 100% of memory can have an OOM score of 10. Ensure // that burstable pods have a higher OOM score adjustment. - if oomScoreAdjust < 2 { - return 2 + if int(oomScoreAdjust) < (1000 + guaranteedOOMScoreAdj) { + return (1000 + guaranteedOOMScoreAdj) } // Give burstable pods a higher chance of survival over besteffort pods. if int(oomScoreAdjust) == besteffortOOMScoreAdj { diff --git a/vendor/k8s.io/client-go/1.4/pkg/util/config/feature_gate.go b/vendor/k8s.io/client-go/1.4/pkg/util/config/feature_gate.go index fafa53bda8c7..c8782c4c4216 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/util/config/feature_gate.go +++ b/vendor/k8s.io/client-go/1.4/pkg/util/config/feature_gate.go @@ -216,6 +216,7 @@ func (f *featureGate) AddFlag(fs *pflag.FlagSet) { } known = append(known, fmt.Sprintf("%s=true|false (%sdefault=%t)", k, pre, v.enabled)) } + sort.Strings(known) fs.Var(f, flagName, ""+ "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ "Options are:\n"+strings.Join(known, "\n")) diff --git a/vendor/k8s.io/client-go/1.4/pkg/util/net/http.go b/vendor/k8s.io/client-go/1.4/pkg/util/net/http.go index 53f28dfca253..15df077412eb 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/util/net/http.go +++ b/vendor/k8s.io/client-go/1.4/pkg/util/net/http.go @@ -108,6 +108,34 @@ func Dialer(transport http.RoundTripper) (DialFunc, error) { } } +// CloneTLSConfig returns a tls.Config with all exported fields except SessionTicketsDisabled and SessionTicketKey copied. +// This makes it safe to call CloneTLSConfig on a config in active use by a server. +// TODO: replace with tls.Config#Clone when we move to go1.8 +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} + func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { if transport == nil { return nil, nil diff --git a/vendor/k8s.io/client-go/1.4/pkg/version/base.go b/vendor/k8s.io/client-go/1.4/pkg/version/base.go index c377705fe8f1..2609ba2f5301 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/version/base.go +++ b/vendor/k8s.io/client-go/1.4/pkg/version/base.go @@ -39,8 +39,8 @@ var ( // them irrelevant. (Next we'll take it out, which may muck with // scripts consuming the kubectl version output - but most of // these should be looking at gitVersion already anyways.) - gitMajor string = "" // major version, always numeric - gitMinor string = "" // minor version, numeric possibly followed by "+" + gitMajor string = "1" // major version, always numeric + gitMinor string = "4+" // minor version, numeric possibly followed by "+" // semantic version, derived by build scripts (see // https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md @@ -51,7 +51,7 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. - gitVersion string = "v0.0.0-master+$Format:%h$" + gitVersion string = "v1.4.1-beta.0+$Format:%h$" gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" diff --git a/vendor/k8s.io/client-go/1.4/pkg/watch/watch.go b/vendor/k8s.io/client-go/1.4/pkg/watch/watch.go index 0879ce8f5d1d..01cdd2a88c45 100644 --- a/vendor/k8s.io/client-go/1.4/pkg/watch/watch.go +++ b/vendor/k8s.io/client-go/1.4/pkg/watch/watch.go @@ -20,6 +20,8 @@ import ( "sync" "k8s.io/client-go/1.4/pkg/runtime" + + "github.com/golang/glog" ) // Interface can be implemented by anything that knows how to watch and report changes. @@ -89,16 +91,29 @@ func NewFake() *FakeWatcher { } } +func NewFakeWithChanSize(size int) *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event, size), + } +} + // Stop implements Interface.Stop(). func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { + glog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } } +func (f *FakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + // Reset prepares the watcher to be reused. func (f *FakeWatcher) Reset() { f.Lock() diff --git a/vendor/k8s.io/client-go/1.4/rest/config.go b/vendor/k8s.io/client-go/1.4/rest/config.go index a7136f254616..766459c820d8 100644 --- a/vendor/k8s.io/client-go/1.4/rest/config.go +++ b/vendor/k8s.io/client-go/1.4/rest/config.go @@ -28,8 +28,6 @@ import ( "github.com/golang/glog" - "time" - "k8s.io/client-go/1.4/pkg/api" "k8s.io/client-go/1.4/pkg/api/unversioned" "k8s.io/client-go/1.4/pkg/runtime" @@ -111,9 +109,6 @@ type Config struct { // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst RateLimiter flowcontrol.RateLimiter - // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. - Timeout time.Duration - // Version forces a specific version to be used (if registered) // Do we need this? // Version string diff --git a/vendor/k8s.io/client-go/1.4/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/1.4/tools/cache/delta_fifo.go index 9c6abd2c3593..9ed40fd406ba 100644 --- a/vendor/k8s.io/client-go/1.4/tools/cache/delta_fifo.go +++ b/vendor/k8s.io/client-go/1.4/tools/cache/delta_fifo.go @@ -522,6 +522,18 @@ func (f *DeltaFIFO) syncKey(key string) error { return nil } + // If we are doing Resync() and there is already an event queued for that object, + // we ignore the Resync for it. This is to avoid the race, in which the resync + // comes with the previous value of object (since queueing an event for the object + // doesn't trigger changing the underlying store . + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + if len(f.items[id]) > 0 { + return nil + } + if err := f.queueActionLocked(Sync, obj); err != nil { return fmt.Errorf("couldn't queue object: %v", err) } diff --git a/vendor/k8s.io/client-go/1.4/tools/cache/delta_fifo_test.go b/vendor/k8s.io/client-go/1.4/tools/cache/delta_fifo_test.go index 3d087a4b0e25..f1a95573709e 100644 --- a/vendor/k8s.io/client-go/1.4/tools/cache/delta_fifo_test.go +++ b/vendor/k8s.io/client-go/1.4/tools/cache/delta_fifo_test.go @@ -336,6 +336,29 @@ func TestDeltaFIFO_ReplaceMakesDeletions(t *testing.T) { } } +func TestDeltaFIFO_UpdateResyncRace(t *testing.T) { + f := NewDeltaFIFO( + testFifoObjectKeyFunc, + nil, + keyLookupFunc(func() []testFifoObject { + return []testFifoObject{mkFifoObj("foo", 5)} + }), + ) + f.Update(mkFifoObj("foo", 6)) + f.Resync() + + expectedList := []Deltas{ + {{Updated, mkFifoObj("foo", 6)}}, + } + + for _, expected := range expectedList { + cur := Pop(f).(Deltas) + if e, a := expected, cur; !reflect.DeepEqual(e, a) { + t.Errorf("Expected %#v, got %#v", e, a) + } + } +} + func TestDeltaFIFO_detectLineJumpers(t *testing.T) { f := NewDeltaFIFO(testFifoObjectKeyFunc, nil, nil) diff --git a/vendor/k8s.io/client-go/1.4/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/1.4/tools/clientcmd/client_config.go index b549e79fc4b2..32fd9eeb4829 100644 --- a/vendor/k8s.io/client-go/1.4/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/1.4/tools/clientcmd/client_config.go @@ -23,13 +23,10 @@ import ( "net/url" "os" "strings" - "time" "github.com/golang/glog" "github.com/imdario/mergo" - "strconv" - "k8s.io/client-go/1.4/pkg/api" "k8s.io/client-go/1.4/rest" clientauth "k8s.io/client-go/1.4/tools/auth" @@ -37,16 +34,25 @@ import ( ) var ( - // DefaultCluster is the cluster config used when no other config is specified - // TODO: eventually apiserver should start on 443 and be secure by default - DefaultCluster = clientcmdapi.Cluster{Server: "http://localhost:8080"} - - // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name - EnvVarCluster = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")} - - DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{}, nil, NewDefaultClientConfigLoadingRules()} + // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields + // DEPRECATED will be replaced + ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} + // DefaultClientConfig represents the legacy behavior of this package for defaulting + // DEPRECATED will be replace + DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ + ClusterDefaults: ClusterDefaults, + }, nil, NewDefaultClientConfigLoadingRules()} ) +// getDefaultServer returns a default setting for DefaultClientConfig +// DEPRECATED +func getDefaultServer() string { + if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { + return server + } + return "http://localhost:8080" +} + // ClientConfig is used to make it easy to get an api server client type ClientConfig interface { // RawConfig returns the merged result of all overrides @@ -102,17 +108,6 @@ func (config *DirectClientConfig) ClientConfig() (*rest.Config, error) { clientConfig := &rest.Config{} clientConfig.Host = configClusterInfo.Server - - if len(config.overrides.Timeout) > 0 { - if i, err := strconv.ParseInt(config.overrides.Timeout, 10, 64); err == nil && i >= 0 { - clientConfig.Timeout = time.Duration(i) * time.Second - } else if requestTimeout, err := time.ParseDuration(config.overrides.Timeout); err == nil { - clientConfig.Timeout = requestTimeout - } else { - return nil, fmt.Errorf("Invalid value for option '--request-timeout'. Value must be a single integer, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") - } - } - if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { u.RawQuery = "" u.Fragment = "" @@ -344,7 +339,6 @@ func (config *DirectClientConfig) getCluster() clientcmdapi.Cluster { var mergedClusterInfo clientcmdapi.Cluster mergo.Merge(&mergedClusterInfo, config.overrides.ClusterDefaults) - mergo.Merge(&mergedClusterInfo, EnvVarCluster) if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { mergo.Merge(&mergedClusterInfo, configClusterInfo) } @@ -364,6 +358,8 @@ func (config *DirectClientConfig) getCluster() clientcmdapi.Cluster { // inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. type inClusterClientConfig struct{} +var _ ClientConfig = inClusterClientConfig{} + func (inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") } @@ -372,21 +368,21 @@ func (inClusterClientConfig) ClientConfig() (*rest.Config, error) { return rest.InClusterConfig() } -func (inClusterClientConfig) Namespace() (string, error) { +func (inClusterClientConfig) Namespace() (string, bool, error) { // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up if ns := os.Getenv("POD_NAMESPACE"); ns != "" { - return ns, nil + return ns, true, nil } // Fall back to the namespace associated with the service account token, if available if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { if ns := strings.TrimSpace(string(data)); len(ns) > 0 { - return ns, nil + return ns, true, nil } } - return "default", nil + return "default", false, nil } func (inClusterClientConfig) ConfigAccess() ConfigAccess { diff --git a/vendor/k8s.io/client-go/1.4/tools/clientcmd/client_config_test.go b/vendor/k8s.io/client-go/1.4/tools/clientcmd/client_config_test.go index 49f7e76c73a7..af66403f4429 100644 --- a/vendor/k8s.io/client-go/1.4/tools/clientcmd/client_config_test.go +++ b/vendor/k8s.io/client-go/1.4/tools/clientcmd/client_config_test.go @@ -293,8 +293,6 @@ func TestCreateCleanWithPrefix(t *testing.T) { {"anything", "anything"}, } - // WARNING: EnvVarCluster.Server is set during package loading time and can not be overridden by os.Setenv inside this test - EnvVarCluster.Server = "" tt = append(tt, struct{ server, host string }{"", "http://localhost:8080"}) for _, tc := range tt { @@ -305,7 +303,7 @@ func TestCreateCleanWithPrefix(t *testing.T) { config.Clusters["clean"] = cleanConfig clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{ - ClusterDefaults: DefaultCluster, + ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"}, }, nil) clientConfig, err := clientBuilder.ClientConfig() @@ -334,7 +332,7 @@ func TestCreateCleanDefault(t *testing.T) { func TestCreateCleanDefaultCluster(t *testing.T) { config := createValidTestConfig() clientBuilder := NewDefaultClientConfig(*config, &ConfigOverrides{ - ClusterDefaults: DefaultCluster, + ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"}, }) clientConfig, err := clientBuilder.ClientConfig() @@ -361,7 +359,7 @@ func TestCreateMissingContext(t *testing.T) { const expectedErrorContains = "Context was not found for specified context" config := createValidTestConfig() clientBuilder := NewNonInteractiveClientConfig(*config, "not-present", &ConfigOverrides{ - ClusterDefaults: DefaultCluster, + ClusterDefaults: clientcmdapi.Cluster{Server: "http://localhost:8080"}, }, nil) clientConfig, err := clientBuilder.ClientConfig() diff --git a/vendor/k8s.io/client-go/1.4/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/1.4/tools/clientcmd/loader.go index b5d460f531f9..008d8e2f29ce 100644 --- a/vendor/k8s.io/client-go/1.4/tools/clientcmd/loader.go +++ b/vendor/k8s.io/client-go/1.4/tools/clientcmd/loader.go @@ -23,6 +23,7 @@ import ( "os" "path" "path/filepath" + "reflect" goruntime "runtime" "strings" @@ -33,6 +34,7 @@ import ( "k8s.io/client-go/1.4/pkg/runtime" utilerrors "k8s.io/client-go/1.4/pkg/util/errors" "k8s.io/client-go/1.4/pkg/util/homedir" + "k8s.io/client-go/1.4/rest" clientcmdapi "k8s.io/client-go/1.4/tools/clientcmd/api" clientcmdlatest "k8s.io/client-go/1.4/tools/clientcmd/api/latest" ) @@ -65,6 +67,9 @@ func currentMigrationRules() map[string]string { type ClientConfigLoader interface { ConfigAccess + // IsDefaultConfig returns true if the returned config matches the defaults. + IsDefaultConfig(*rest.Config) bool + // Load returns the latest config Load() (*clientcmdapi.Config, error) } @@ -96,6 +101,9 @@ func (g *ClientConfigGetter) IsExplicitFile() bool { func (g *ClientConfigGetter) GetExplicitFile() string { return "" } +func (g *ClientConfigGetter) IsDefaultConfig(config *rest.Config) bool { + return false +} // ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config // Callers can put the chain together however they want, but we'd recommend: @@ -112,6 +120,10 @@ type ClientConfigLoadingRules struct { // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so // that a default object that doesn't set this will usually get the behavior it wants. DoNotResolvePaths bool + + // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. + // This should match the overrides passed in to ClientConfig loader. + DefaultClientConfig ClientConfig } // ClientConfigLoadingRules implements the ClientConfigLoader interface. @@ -192,6 +204,7 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { // first merge all of our maps mapConfig := clientcmdapi.NewConfig() + for _, kubeconfig := range kubeconfigs { mergo.Merge(mapConfig, kubeconfig) } @@ -316,6 +329,18 @@ func (rules *ClientConfigLoadingRules) GetExplicitFile() string { return rules.ExplicitPath } +// IsDefaultConfig returns true if the provided configuration matches the default +func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *rest.Config) bool { + if rules.DefaultClientConfig == nil { + return false + } + defaultConfig, err := rules.DefaultClientConfig.ClientConfig() + if err != nil { + return false + } + return reflect.DeepEqual(config, defaultConfig) +} + // LoadFromFile takes a filename and deserializes the contents into Config object func LoadFromFile(filename string) (*clientcmdapi.Config, error) { kubeconfigBytes, err := ioutil.ReadFile(filename) diff --git a/vendor/k8s.io/client-go/1.4/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/1.4/tools/clientcmd/merged_client_builder.go index d35cbc2b5369..9cd8c06fd22e 100644 --- a/vendor/k8s.io/client-go/1.4/tools/clientcmd/merged_client_builder.go +++ b/vendor/k8s.io/client-go/1.4/tools/clientcmd/merged_client_builder.go @@ -18,11 +18,8 @@ package clientcmd import ( "io" - "reflect" "sync" - "github.com/golang/glog" - "k8s.io/client-go/1.4/rest" clientcmdapi "k8s.io/client-go/1.4/tools/clientcmd/api" ) @@ -39,16 +36,25 @@ type DeferredLoadingClientConfig struct { clientConfig ClientConfig loadingLock sync.Mutex + + // provided for testing + icc InClusterConfig +} + +// InClusterConfig abstracts details of whether the client is running in a cluster for testing. +type InClusterConfig interface { + ClientConfig + Possible() bool } // NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides} + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: inClusterClientConfig{}} } // NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { - return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, fallbackReader: fallbackReader} + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: inClusterClientConfig{}, fallbackReader: fallbackReader} } func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { @@ -92,18 +98,30 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*rest.Config, error) return nil, err } + // load the configuration and return on non-empty errors and if the + // content differs from the default config mergedConfig, err := mergedClientConfig.ClientConfig() - if err != nil { - return nil, err + switch { + case err != nil: + if !IsEmptyConfig(err) { + // return on any error except empty config + return nil, err + } + case mergedConfig != nil: + // the configuration is valid, but if this is equal to the defaults we should try + // in-cluster configuration + if !config.loader.IsDefaultConfig(mergedConfig) { + return mergedConfig, nil + } } - // Are we running in a cluster and were no other configs found? If so, use the in-cluster-config. - icc := inClusterClientConfig{} - defaultConfig, err := DefaultClientConfig.ClientConfig() - if icc.Possible() && err == nil && reflect.DeepEqual(mergedConfig, defaultConfig) { - glog.V(2).Info("No kubeconfig could be created, falling back to service account.") - return icc.ClientConfig() + + // check for in-cluster configuration and use it + if config.icc.Possible() { + return config.icc.ClientConfig() } - return mergedConfig, nil + + // return the result of the merged client config + return mergedConfig, err } // Namespace implements KubeConfig diff --git a/vendor/k8s.io/client-go/1.4/tools/clientcmd/merged_client_builder_test.go b/vendor/k8s.io/client-go/1.4/tools/clientcmd/merged_client_builder_test.go new file mode 100644 index 000000000000..5e4db1bc4a92 --- /dev/null +++ b/vendor/k8s.io/client-go/1.4/tools/clientcmd/merged_client_builder_test.go @@ -0,0 +1,217 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "testing" + + "k8s.io/client-go/1.4/rest" + clientcmdapi "k8s.io/client-go/1.4/tools/clientcmd/api" +) + +type testLoader struct { + ClientConfigLoader + + called bool + config *clientcmdapi.Config + err error +} + +func (l *testLoader) Load() (*clientcmdapi.Config, error) { + l.called = true + return l.config, l.err +} + +type testClientConfig struct { + config *rest.Config + err error +} + +func (c *testClientConfig) RawConfig() (clientcmdapi.Config, error) { + return clientcmdapi.Config{}, fmt.Errorf("unexpected call") +} +func (c *testClientConfig) ClientConfig() (*rest.Config, error) { + return c.config, c.err +} +func (c *testClientConfig) Namespace() (string, bool, error) { + return "", false, fmt.Errorf("unexpected call") +} +func (c *testClientConfig) ConfigAccess() ConfigAccess { + return nil +} + +type testICC struct { + testClientConfig + + possible bool + called bool +} + +func (icc *testICC) Possible() bool { + icc.called = true + return icc.possible +} + +func TestInClusterConfig(t *testing.T) { + default1 := &DirectClientConfig{ + config: *createValidTestConfig(), + contextName: "clean", + overrides: &ConfigOverrides{}, + } + invalidDefaultConfig := clientcmdapi.NewConfig() + invalidDefaultConfig.Clusters["clean"] = &clientcmdapi.Cluster{ + Server: "http://localhost:8080", + } + invalidDefaultConfig.Contexts["other"] = &clientcmdapi.Context{ + Cluster: "clean", + } + invalidDefaultConfig.CurrentContext = "clean" + + defaultInvalid := &DirectClientConfig{ + config: *invalidDefaultConfig, + overrides: &ConfigOverrides{}, + } + if _, err := defaultInvalid.ClientConfig(); err == nil || !IsConfigurationInvalid(err) { + t.Fatal(err) + } + config1, err := default1.ClientConfig() + if err != nil { + t.Fatal(err) + } + config2 := &rest.Config{Host: "config2"} + err1 := fmt.Errorf("unique error") + + testCases := map[string]struct { + clientConfig *testClientConfig + icc *testICC + defaultConfig *DirectClientConfig + + checkedICC bool + result *rest.Config + err error + }{ + "in-cluster checked on other error": { + clientConfig: &testClientConfig{err: ErrEmptyConfig}, + icc: &testICC{}, + + checkedICC: true, + result: nil, + err: ErrEmptyConfig, + }, + + "in-cluster not checked on non-empty error": { + clientConfig: &testClientConfig{err: ErrEmptyCluster}, + icc: &testICC{}, + + checkedICC: false, + result: nil, + err: ErrEmptyCluster, + }, + + "in-cluster checked when config is default": { + defaultConfig: default1, + clientConfig: &testClientConfig{config: config1}, + icc: &testICC{}, + + checkedICC: true, + result: config1, + err: nil, + }, + + "in-cluster not checked when default config is invalid": { + defaultConfig: defaultInvalid, + clientConfig: &testClientConfig{config: config1}, + icc: &testICC{}, + + checkedICC: false, + result: config1, + err: nil, + }, + + "in-cluster not checked when config is not equal to default": { + defaultConfig: default1, + clientConfig: &testClientConfig{config: config2}, + icc: &testICC{}, + + checkedICC: false, + result: config2, + err: nil, + }, + + "in-cluster checked when config is not equal to default and error is empty": { + clientConfig: &testClientConfig{config: config2, err: ErrEmptyConfig}, + icc: &testICC{}, + + checkedICC: true, + result: config2, + err: ErrEmptyConfig, + }, + + "in-cluster error returned when config is empty": { + clientConfig: &testClientConfig{err: ErrEmptyConfig}, + icc: &testICC{ + possible: true, + testClientConfig: testClientConfig{ + err: err1, + }, + }, + + checkedICC: true, + result: nil, + err: err1, + }, + + "in-cluster config returned when config is empty": { + clientConfig: &testClientConfig{err: ErrEmptyConfig}, + icc: &testICC{ + possible: true, + testClientConfig: testClientConfig{ + config: config2, + }, + }, + + checkedICC: true, + result: config2, + err: nil, + }, + + "in-cluster not checked when standard default is invalid": { + defaultConfig: &DefaultClientConfig, + clientConfig: &testClientConfig{config: config2}, + icc: &testICC{}, + + checkedICC: false, + result: config2, + err: nil, + }, + } + + for name, test := range testCases { + c := &DeferredLoadingClientConfig{icc: test.icc} + c.loader = &ClientConfigLoadingRules{DefaultClientConfig: test.defaultConfig} + c.clientConfig = test.clientConfig + + cfg, err := c.ClientConfig() + if test.icc.called != test.checkedICC { + t.Errorf("%s: unexpected in-cluster-config call %t", name, test.icc.called) + } + if err != test.err || cfg != test.result { + t.Errorf("%s: unexpected result: %v %#v", name, err, cfg) + } + } +} diff --git a/vendor/k8s.io/client-go/1.4/tools/clientcmd/overrides.go b/vendor/k8s.io/client-go/1.4/tools/clientcmd/overrides.go index 54d541a1f2da..b5d88ce463a9 100644 --- a/vendor/k8s.io/client-go/1.4/tools/clientcmd/overrides.go +++ b/vendor/k8s.io/client-go/1.4/tools/clientcmd/overrides.go @@ -33,7 +33,6 @@ type ConfigOverrides struct { ClusterInfo clientcmdapi.Cluster Context clientcmdapi.Context CurrentContext string - Timeout string } // ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly @@ -43,7 +42,6 @@ type ConfigOverrideFlags struct { ClusterOverrideFlags ClusterOverrideFlags ContextOverrideFlags ContextOverrideFlags CurrentContext FlagInfo - Timeout FlagInfo } // AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects @@ -123,7 +121,6 @@ const ( FlagImpersonate = "as" FlagUsername = "username" FlagPassword = "password" - FlagTimeout = "request-timeout" ) // RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing @@ -154,9 +151,7 @@ func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), - - CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, - Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, + CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, } } @@ -195,7 +190,6 @@ func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNam BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) - flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) } // BindFlags is a convenience method to bind the specified flags to their associated variables diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go b/vendor/k8s.io/kubernetes/pkg/api/v1/types.go index 6b5d4718d2d6..0fb0ced619f8 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/types.go +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/types.go @@ -1242,7 +1242,7 @@ type Container struct { Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` // Pod volumes to mount into the container's filesystem. // Cannot be updated. - VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeMounts"` + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go index 0ac4d550a49d..136cb6e8b578 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go @@ -2574,6 +2574,9 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) } + // TODO(freehan): allow user to update loadbalancerSourceRanges + allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...) + allErrs = append(allErrs, ValidateService(service)...) return allErrs } diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation_test.go b/vendor/k8s.io/kubernetes/pkg/api/validation/validation_test.go index 6e510ec7f3cc..01031fae64da 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation_test.go +++ b/vendor/k8s.io/kubernetes/pkg/api/validation/validation_test.go @@ -6388,6 +6388,25 @@ func TestValidateServiceUpdate(t *testing.T) { }, numErrs: 0, }, + { + name: "add loadBalancerSourceRanges", + tweakSvc: func(oldSvc, newSvc *api.Service) { + oldSvc.Spec.Type = api.ServiceTypeLoadBalancer + newSvc.Spec.Type = api.ServiceTypeLoadBalancer + newSvc.Spec.LoadBalancerSourceRanges = []string{"10.0.0.0/8"} + }, + numErrs: 1, + }, + { + name: "update loadBalancerSourceRanges", + tweakSvc: func(oldSvc, newSvc *api.Service) { + oldSvc.Spec.Type = api.ServiceTypeLoadBalancer + oldSvc.Spec.LoadBalancerSourceRanges = []string{"10.0.0.0/8"} + newSvc.Spec.Type = api.ServiceTypeLoadBalancer + newSvc.Spec.LoadBalancerSourceRanges = []string{"10.180.0.0/16"} + }, + numErrs: 1, + }, } for _, tc := range testCases { diff --git a/vendor/k8s.io/kubernetes/pkg/apiserver/audit/audit.go b/vendor/k8s.io/kubernetes/pkg/apiserver/audit/audit.go index 54de1ea7ae6c..b7224b4346b3 100644 --- a/vendor/k8s.io/kubernetes/pkg/apiserver/audit/audit.go +++ b/vendor/k8s.io/kubernetes/pkg/apiserver/audit/audit.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "github.com/golang/glog" "github.com/pborman/uuid" authenticationapi "k8s.io/kubernetes/pkg/apis/authentication" @@ -41,7 +42,11 @@ type auditResponseWriter struct { } func (a *auditResponseWriter) WriteHeader(code int) { - fmt.Fprintf(a.out, "%s AUDIT: id=%q response=\"%d\"\n", time.Now().Format(time.RFC3339Nano), a.id, code) + line := fmt.Sprintf("%s AUDIT: id=%q response=\"%d\"\n", time.Now().Format(time.RFC3339Nano), a.id, code) + if _, err := fmt.Fprint(a.out, line); err != nil { + glog.Errorf("Unable to write audit log: %s, the error is: %v", line, err) + } + a.ResponseWriter.WriteHeader(code) } @@ -103,8 +108,11 @@ func WithAudit(handler http.Handler, attributeGetter apiserver.RequestAttributeG } id := uuid.NewRandom().String() - fmt.Fprintf(out, "%s AUDIT: id=%q ip=%q method=%q user=%q as=%q asgroups=%q namespace=%q uri=%q\n", + line := fmt.Sprintf("%s AUDIT: id=%q ip=%q method=%q user=%q as=%q asgroups=%q namespace=%q uri=%q\n", time.Now().Format(time.RFC3339Nano), id, utilnet.GetClientIP(req), req.Method, attribs.GetUser().GetName(), asuser, asgroups, namespace, req.URL) + if _, err := fmt.Fprint(out, line); err != nil { + glog.Errorf("Unable to write audit log: %s, the error is: %v", line, err) + } respWriter := decorateResponseWriter(w, out, id) handler.ServeHTTP(respWriter, req) }) diff --git a/vendor/k8s.io/kubernetes/pkg/auth/handlers/handlers.go b/vendor/k8s.io/kubernetes/pkg/auth/handlers/handlers.go index 0f6e226e9c01..20fced2c0d20 100644 --- a/vendor/k8s.io/kubernetes/pkg/auth/handlers/handlers.go +++ b/vendor/k8s.io/kubernetes/pkg/auth/handlers/handlers.go @@ -43,7 +43,8 @@ func init() { // NewRequestAuthenticator creates an http handler that tries to authenticate the given request as a user, and then // stores any such user found onto the provided context for the request. If authentication fails or returns an error -// the failed handler is used. On success, handler is invoked to serve the request. +// the failed handler is used. On success, "Authorization" header is removed from the request and handler +// is invoked to serve the request. func NewRequestAuthenticator(mapper api.RequestContextMapper, auth authenticator.Request, failed http.Handler, handler http.Handler) (http.Handler, error) { return api.NewRequestContextFilter( mapper, @@ -57,6 +58,9 @@ func NewRequestAuthenticator(mapper api.RequestContextMapper, auth authenticator return } + // authorization header is not required anymore in case of a successful authentication. + req.Header.Del("Authorization") + if ctx, ok := mapper.Get(req); ok { mapper.Update(req, api.WithUser(ctx, user)) } diff --git a/vendor/k8s.io/kubernetes/pkg/auth/handlers/handlers_test.go b/vendor/k8s.io/kubernetes/pkg/auth/handlers/handlers_test.go index 1118081207b5..8da2c233f7ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/auth/handlers/handlers_test.go +++ b/vendor/k8s.io/kubernetes/pkg/auth/handlers/handlers_test.go @@ -33,7 +33,10 @@ func TestAuthenticateRequest(t *testing.T) { auth, err := NewRequestAuthenticator( contextMapper, authenticator.RequestFunc(func(req *http.Request) (user.Info, bool, error) { - return &user.DefaultInfo{Name: "user"}, true, nil + if req.Header.Get("Authorization") == "Something" { + return &user.DefaultInfo{Name: "user"}, true, nil + } + return nil, false, errors.New("Authorization header is missing.") }), http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) { t.Errorf("unexpected call to failed") @@ -47,11 +50,14 @@ func TestAuthenticateRequest(t *testing.T) { if user == nil || !ok { t.Errorf("no user stored in context: %#v", ctx) } + if req.Header.Get("Authorization") != "" { + t.Errorf("Authorization header should be removed from request on success: %#v", req) + } close(success) }), ) - auth.ServeHTTP(httptest.NewRecorder(), &http.Request{}) + auth.ServeHTTP(httptest.NewRecorder(), &http.Request{Header: map[string][]string{"Authorization": {"Something"}}}) <-success empty, err := api.IsEmpty(contextMapper) diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go index 1789513221ae..1cfece29b32a 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go @@ -28,7 +28,7 @@ import ( "strings" "time" - "gopkg.in/gcfg.v1" + gcfg "gopkg.in/gcfg.v1" "k8s.io/kubernetes/pkg/api" apiservice "k8s.io/kubernetes/pkg/api/service" @@ -41,13 +41,13 @@ import ( "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" + "cloud.google.com/go/compute/metadata" "github.com/golang/glog" "golang.org/x/oauth2" "golang.org/x/oauth2/google" compute "google.golang.org/api/compute/v1" container "google.golang.org/api/container/v1" "google.golang.org/api/googleapi" - "google.golang.org/cloud/compute/metadata" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go b/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go index b1e786278442..9b2d2414d1b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go @@ -25,6 +25,9 @@ import ( "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime" + "k8s.io/kubernetes/pkg/util/wait" + + "github.com/golang/glog" ) // if you use this, there is one behavior change compared to a standard Informer. @@ -77,6 +80,34 @@ func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resy return sharedIndexInformer } +// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. +type InformerSynced func() bool + +// syncedPollPeriod controls how often you look at the status of your sync funcs +const syncedPollPeriod = 100 * time.Millisecond + +// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false +// if the contoller should shutdown +func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + err := wait.PollUntil(syncedPollPeriod, + func() (bool, error) { + for _, syncFunc := range cacheSyncs { + if !syncFunc() { + return false, nil + } + } + return true, nil + }, + stopCh) + if err != nil { + glog.V(2).Infof("stop requested") + return false + } + + glog.V(4).Infof("caches populated") + return true +} + type sharedIndexInformer struct { indexer cache.Indexer controller *Controller diff --git a/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go b/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go index 5cbdf3dadd12..eae990de21f7 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/node/nodecontroller.go @@ -287,9 +287,21 @@ func NewNodeController( } }, DeleteFunc: func(obj interface{}) { - node := obj.(*api.Node) - err := nc.cidrAllocator.ReleaseCIDR(node) - if err != nil { + node, isNode := obj.(*api.Node) + // We can get DeletedFinalStateUnknown instead of *api.Node here and we need to handle that correctly. #34692 + if !isNode { + deletedState, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + glog.Errorf("Received unexpected object: %v", obj) + return + } + node, ok = deletedState.Obj.(*api.Node) + if !ok { + glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) + return + } + } + if err := nc.cidrAllocator.ReleaseCIDR(node); err != nil { glog.Errorf("Error releasing CIDR: %v", err) } }, @@ -386,94 +398,103 @@ func (nc *NodeController) Run() { go nc.internalPodInformer.Run(wait.NeverStop) } - // Incorporate the results of node status pushed from kubelet to master. - go wait.Until(func() { - if err := nc.monitorNodeStatus(); err != nil { - glog.Errorf("Error monitoring node status: %v", err) - } - }, nc.nodeMonitorPeriod, wait.NeverStop) - - // Managing eviction of nodes: - // 1. when we delete pods off a node, if the node was not empty at the time we then - // queue a termination watcher - // a. If we hit an error, retry deletion - // 2. The terminator loop ensures that pods are eventually cleaned and we never - // terminate a pod in a time period less than nc.maximumGracePeriod. AddedAt - // is the time from which we measure "has this pod been terminating too long", - // after which we will delete the pod with grace period 0 (force delete). - // a. If we hit errors, retry instantly - // b. If there are no pods left terminating, exit - // c. If there are pods still terminating, wait for their estimated completion - // before retrying - go wait.Until(func() { - nc.evictorLock.Lock() - defer nc.evictorLock.Unlock() - for k := range nc.zonePodEvictor { - nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) { - obj, exists, err := nc.nodeStore.GetByKey(value.Value) - if err != nil { - glog.Warningf("Failed to get Node %v from the nodeStore: %v", value.Value, err) - } else if !exists { - glog.Warningf("Node %v no longer present in nodeStore!", value.Value) - } else { - node, _ := obj.(*api.Node) - zone := utilnode.GetZoneKey(node) - EvictionsNumber.WithLabelValues(zone).Inc() - } - - nodeUid, _ := value.UID.(string) - remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err)) - return false, 0 - } + go func() { + defer utilruntime.HandleCrash() - if remaining { - nc.zoneTerminationEvictor[k].Add(value.Value, value.UID) - } - return true, 0 - }) + if !framework.WaitForCacheSync(wait.NeverStop, nc.nodeController.HasSynced, nc.podController.HasSynced, nc.daemonSetController.HasSynced) { + utilruntime.HandleError(errors.New("NodeController timed out while waiting for informers to sync...")) + return } - }, nodeEvictionPeriod, wait.NeverStop) - - // TODO: replace with a controller that ensures pods that are terminating complete - // in a particular time period - go wait.Until(func() { - nc.evictorLock.Lock() - defer nc.evictorLock.Unlock() - for k := range nc.zoneTerminationEvictor { - nc.zoneTerminationEvictor[k].Try(func(value TimedValue) (bool, time.Duration) { - nodeUid, _ := value.UID.(string) - completed, remaining, err := terminatePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, value.AddedAt, nc.maximumGracePeriod) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to terminate pods on node %q: %v", value.Value, err)) - return false, 0 - } - if completed { - glog.V(2).Infof("All pods terminated on %s", value.Value) - recordNodeEvent(nc.recorder, value.Value, nodeUid, api.EventTypeNormal, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value)) + // Incorporate the results of node status pushed from kubelet to master. + go wait.Until(func() { + if err := nc.monitorNodeStatus(); err != nil { + glog.Errorf("Error monitoring node status: %v", err) + } + }, nc.nodeMonitorPeriod, wait.NeverStop) + + // Managing eviction of nodes: + // 1. when we delete pods off a node, if the node was not empty at the time we then + // queue a termination watcher + // a. If we hit an error, retry deletion + // 2. The terminator loop ensures that pods are eventually cleaned and we never + // terminate a pod in a time period less than nc.maximumGracePeriod. AddedAt + // is the time from which we measure "has this pod been terminating too long", + // after which we will delete the pod with grace period 0 (force delete). + // a. If we hit errors, retry instantly + // b. If there are no pods left terminating, exit + // c. If there are pods still terminating, wait for their estimated completion + // before retrying + go wait.Until(func() { + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() + for k := range nc.zonePodEvictor { + nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) { + obj, exists, err := nc.nodeStore.GetByKey(value.Value) + if err != nil { + glog.Warningf("Failed to get Node %v from the nodeStore: %v", value.Value, err) + } else if !exists { + glog.Warningf("Node %v no longer present in nodeStore!", value.Value) + } else { + node, _ := obj.(*api.Node) + zone := utilnode.GetZoneKey(node) + EvictionsNumber.WithLabelValues(zone).Inc() + } + + nodeUid, _ := value.UID.(string) + remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err)) + return false, 0 + } + + if remaining { + nc.zoneTerminationEvictor[k].Add(value.Value, value.UID) + } return true, 0 - } + }) + } + }, nodeEvictionPeriod, wait.NeverStop) + + // TODO: replace with a controller that ensures pods that are terminating complete + // in a particular time period + go wait.Until(func() { + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() + for k := range nc.zoneTerminationEvictor { + nc.zoneTerminationEvictor[k].Try(func(value TimedValue) (bool, time.Duration) { + nodeUid, _ := value.UID.(string) + completed, remaining, err := terminatePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, value.AddedAt, nc.maximumGracePeriod) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to terminate pods on node %q: %v", value.Value, err)) + return false, 0 + } - glog.V(2).Infof("Pods terminating since %s on %q, estimated completion %s", value.AddedAt, value.Value, remaining) - // clamp very short intervals - if remaining < nodeEvictionPeriod { - remaining = nodeEvictionPeriod - } - return false, remaining - }) - } - }, nodeEvictionPeriod, wait.NeverStop) + if completed { + glog.V(2).Infof("All pods terminated on %s", value.Value) + recordNodeEvent(nc.recorder, value.Value, nodeUid, api.EventTypeNormal, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value)) + return true, 0 + } - go wait.Until(func() { - pods, err := nc.podStore.List(labels.Everything()) - if err != nil { - utilruntime.HandleError(err) - return - } - cleanupOrphanedPods(pods, nc.nodeStore.Store, nc.forcefullyDeletePod) - }, 30*time.Second, wait.NeverStop) + glog.V(2).Infof("Pods terminating since %s on %q, estimated completion %s", value.AddedAt, value.Value, remaining) + // clamp very short intervals + if remaining < nodeEvictionPeriod { + remaining = nodeEvictionPeriod + } + return false, remaining + }) + } + }, nodeEvictionPeriod, wait.NeverStop) + + go wait.Until(func() { + pods, err := nc.podStore.List(labels.Everything()) + if err != nil { + utilruntime.HandleError(err) + return + } + cleanupOrphanedPods(pods, nc.nodeStore.Store, nc.forcefullyDeletePod) + }, 30*time.Second, wait.NeverStop) + }() } // monitorNodeStatus verifies node status are constantly updated by kubelet, and if not, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go index 1b8e3f51991c..df560d6a42a9 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go @@ -49,8 +49,15 @@ const ( HpaCustomMetricsTargetAnnotationName = "alpha/target.custom-metrics.podautoscaler.kubernetes.io" HpaCustomMetricsStatusAnnotationName = "alpha/status.custom-metrics.podautoscaler.kubernetes.io" + + scaleUpLimitFactor = 2 + scaleUpLimitMinimum = 4 ) +func calculateScaleUpLimit(currentReplicas int32) int32 { + return int32(math.Max(scaleUpLimitFactor*float64(currentReplicas), scaleUpLimitMinimum)) +} + type HorizontalController struct { scaleNamespacer unversionedextensions.ScalesGetter hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter @@ -149,7 +156,7 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *autoscaling a.eventRecorder.Event(hpa, api.EventTypeWarning, "InvalidSelector", errMsg) return 0, nil, time.Time{}, fmt.Errorf(errMsg) } - currentUtilization, timestamp, err := a.metricsClient.GetCPUUtilization(hpa.Namespace, selector) + currentUtilization, numRunningPods, timestamp, err := a.metricsClient.GetCPUUtilization(hpa.Namespace, selector) // TODO: what to do on partial errors (like metrics obtained for 75% of pods). if err != nil { @@ -160,11 +167,17 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *autoscaling utilization := int32(*currentUtilization) usageRatio := float64(utilization) / float64(targetUtilization) - if math.Abs(1.0-usageRatio) > tolerance { - return int32(math.Ceil(usageRatio * float64(currentReplicas))), &utilization, timestamp, nil + if math.Abs(1.0-usageRatio) <= tolerance { + return currentReplicas, &utilization, timestamp, nil } - return currentReplicas, &utilization, timestamp, nil + desiredReplicas := math.Ceil(usageRatio * float64(numRunningPods)) + + a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "DesiredReplicasComputed", + "Computed the desired num of replicas: %d, on a base of %d report(s) (avgCPUutil: %d, current replicas: %d)", + int32(desiredReplicas), numRunningPods, utilization, scale.Status.Replicas) + + return int32(desiredReplicas), &utilization, timestamp, nil } // Computes the desired number of replicas based on the CustomMetrics passed in cmAnnotation as json-serialized @@ -327,6 +340,12 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *autoscaling.HorizontalPo if desiredReplicas > hpa.Spec.MaxReplicas { desiredReplicas = hpa.Spec.MaxReplicas } + + // Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by + // bogus CPU usage report from heapster/kubelet (like in issue #32304). + if desiredReplicas > calculateScaleUpLimit(currentReplicas) { + desiredReplicas = calculateScaleUpLimit(currentReplicas) + } } rescale := shouldScale(hpa, currentReplicas, desiredReplicas, timestamp) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go index 884beec0d267..b0f9d625e19b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal_test.go @@ -382,8 +382,17 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset { obj := action.(core.CreateAction).GetObject().(*api.Event) if tc.verifyEvents { - assert.Equal(t, "SuccessfulRescale", obj.Reason) - assert.Equal(t, fmt.Sprintf("New size: %d; reason: CPU utilization above target", tc.desiredReplicas), obj.Message) + switch obj.Reason { + case "SuccessfulRescale": + assert.Equal(t, fmt.Sprintf("New size: %d; reason: CPU utilization above target", tc.desiredReplicas), obj.Message) + case "DesiredReplicasComputed": + assert.Equal(t, fmt.Sprintf( + "Computed the desired num of replicas: %d, on a base of %d report(s) (avgCPUutil: %d, current replicas: %d)", + tc.desiredReplicas, len(tc.reportedLevels), + (int64(tc.reportedLevels[0])*100)/tc.reportedCPURequests[0].MilliValue(), tc.initialReplicas), obj.Message) + default: + assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message)) + } } tc.eventCreated = true return true, obj, nil @@ -801,6 +810,34 @@ func TestEventNotCreated(t *testing.T) { tc.runTest(t) } +func TestMissingReports(t *testing.T) { + tc := testCase{ + minReplicas: 1, + maxReplicas: 5, + initialReplicas: 4, + desiredReplicas: 2, + CPUTarget: 50, + reportedLevels: []uint64{200}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.2")}, + useMetricsApi: true, + } + tc.runTest(t) +} + +func TestUpscaleCap(t *testing.T) { + tc := testCase{ + minReplicas: 1, + maxReplicas: 100, + initialReplicas: 3, + desiredReplicas: 6, + CPUTarget: 10, + reportedLevels: []uint64{100, 200, 300}, + reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, + useMetricsApi: true, + } + tc.runTest(t) +} + // TestComputedToleranceAlgImplementation is a regression test which // back-calculates a minimal percentage for downscaling based on a small percentage // increase in pod utilization which is calibrated against the tolerance value. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go index 0ad555a4ab92..422e986e37aa 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go @@ -45,9 +45,9 @@ var heapsterQueryStart = -5 * time.Minute // MetricsClient is an interface for getting metrics for pods. type MetricsClient interface { // GetCPUUtilization returns the average utilization over all pods represented as a percent of requested CPU - // (e.g. 70 means that an average pod uses 70% of the requested CPU) - // and the time of generation of the oldest of utilization reports for pods. - GetCPUUtilization(namespace string, selector labels.Selector) (*int, time.Time, error) + // (e.g. 70 means that an average pod uses 70% of the requested CPU), + // the number of running pods from which CPU usage was collected and the time of generation of the oldest of utilization reports for pods. + GetCPUUtilization(namespace string, selector labels.Selector) (*int, int, time.Time, error) // GetCustomMetric returns the average value of the given custom metrics from the // pods picked using the namespace and selector passed as arguments. @@ -101,30 +101,30 @@ func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, ser } } -func (h *HeapsterMetricsClient) GetCPUUtilization(namespace string, selector labels.Selector) (*int, time.Time, error) { - avgConsumption, avgRequest, timestamp, err := h.GetCpuConsumptionAndRequestInMillis(namespace, selector) +func (h *HeapsterMetricsClient) GetCPUUtilization(namespace string, selector labels.Selector) (utilization *int, numRunningPods int, timestamp time.Time, err error) { + avgConsumption, avgRequest, numRunningPods, timestamp, err := h.GetCpuConsumptionAndRequestInMillis(namespace, selector) if err != nil { - return nil, time.Time{}, fmt.Errorf("failed to get CPU consumption and request: %v", err) + return nil, 0, time.Time{}, fmt.Errorf("failed to get CPU consumption and request: %v", err) } - utilization := int((avgConsumption * 100) / avgRequest) - return &utilization, timestamp, nil + tmp := int((avgConsumption * 100) / avgRequest) + return &tmp, numRunningPods, timestamp, nil } func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace string, selector labels.Selector) (avgConsumption int64, - avgRequest int64, timestamp time.Time, err error) { + avgRequest int64, numRunningPods int, timestamp time.Time, err error) { podList, err := h.client.Core().Pods(namespace). List(api.ListOptions{LabelSelector: selector}) if err != nil { - return 0, 0, time.Time{}, fmt.Errorf("failed to get pod list: %v", err) + return 0, 0, 0, time.Time{}, fmt.Errorf("failed to get pod list: %v", err) } podNames := map[string]struct{}{} requestSum := int64(0) missing := false for _, pod := range podList.Items { - if pod.Status.Phase == api.PodPending { - // Skip pending pods. + if pod.Status.Phase != api.PodRunning { + // Count only running pods. continue } @@ -138,19 +138,19 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st } } if len(podNames) == 0 && len(podList.Items) > 0 { - return 0, 0, time.Time{}, fmt.Errorf("no running pods") + return 0, 0, 0, time.Time{}, fmt.Errorf("no running pods") } if missing || requestSum == 0 { - return 0, 0, time.Time{}, fmt.Errorf("some pods do not have request for cpu") + return 0, 0, 0, time.Time{}, fmt.Errorf("some pods do not have request for cpu") } glog.V(4).Infof("%s %s - sum of CPU requested: %d", namespace, selector, requestSum) - requestAvg := requestSum / int64(len(podList.Items)) + requestAvg := requestSum / int64(len(podNames)) // Consumption is already averaged and in millis. consumption, timestamp, err := h.getCpuUtilizationForPods(namespace, selector, podNames) if err != nil { - return 0, 0, time.Time{}, err + return 0, 0, 0, time.Time{}, err } - return consumption, requestAvg, timestamp, nil + return consumption, requestAvg, len(podNames), timestamp, nil } func (h *HeapsterMetricsClient) getCpuUtilizationForPods(namespace string, selector labels.Selector, podNames map[string]struct{}) (int64, time.Time, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client_test.go b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client_test.go index 1ac615530e7a..3d3cd12e7b45 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client_test.go @@ -67,7 +67,9 @@ type metricPoint struct { type testCase struct { replicas int desiredValue float64 + desiredRequest *float64 desiredError error + desiredRunningPods int targetResource string targetTimestamp int reportedMetricsPoints [][]metricPoint @@ -94,7 +96,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset { obj := &api.PodList{} for i := 0; i < tc.replicas; i++ { podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - pod := buildPod(namespace, podName, podLabels, api.PodRunning) + pod := buildPod(namespace, podName, podLabels, api.PodRunning, "1024") obj.Items = append(obj.Items, pod) } return true, obj, nil @@ -159,7 +161,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset { return fakeClient } -func buildPod(namespace, podName string, podLabels map[string]string, phase api.PodPhase) api.Pod { +func buildPod(namespace, podName string, podLabels map[string]string, phase api.PodPhase, request string) api.Pod { return api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, @@ -171,7 +173,7 @@ func buildPod(namespace, podName string, podLabels map[string]string, phase api. { Resources: api.ResourceRequirements{ Requests: api.ResourceList{ - api.ResourceCPU: resource.MustParse("10"), + api.ResourceCPU: resource.MustParse(request), }, }, }, @@ -183,7 +185,7 @@ func buildPod(namespace, podName string, podLabels map[string]string, phase api. } } -func (tc *testCase) verifyResults(t *testing.T, val *float64, timestamp time.Time, err error) { +func (tc *testCase) verifyResults(t *testing.T, val *float64, req *float64, pods int, timestamp time.Time, err error) { if tc.desiredError != nil { assert.Error(t, err) assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError)) @@ -193,6 +195,12 @@ func (tc *testCase) verifyResults(t *testing.T, val *float64, timestamp time.Tim assert.NotNil(t, val) assert.True(t, tc.desiredValue-0.001 < *val) assert.True(t, tc.desiredValue+0.001 > *val) + assert.Equal(t, tc.desiredRunningPods, pods) + + if tc.desiredRequest != nil { + assert.True(t, *tc.desiredRequest-0.001 < *req) + assert.True(t, *tc.desiredRequest+0.001 > *req) + } targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute) assert.True(t, targetTimestamp.Equal(timestamp)) @@ -202,12 +210,13 @@ func (tc *testCase) runTest(t *testing.T) { testClient := tc.prepareTestClient(t) metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort) if tc.targetResource == "cpu-usage" { - val, _, timestamp, err := metricsClient.GetCpuConsumptionAndRequestInMillis(tc.namespace, tc.selector) + val, req, pods, timestamp, err := metricsClient.GetCpuConsumptionAndRequestInMillis(tc.namespace, tc.selector) fval := float64(val) - tc.verifyResults(t, &fval, timestamp, err) + freq := float64(req) + tc.verifyResults(t, &fval, &freq, pods, timestamp, err) } else { val, timestamp, err := metricsClient.GetCustomMetric(tc.targetResource, tc.namespace, tc.selector) - tc.verifyResults(t, val, timestamp, err) + tc.verifyResults(t, val, nil, 0, timestamp, err) } } @@ -215,6 +224,7 @@ func TestCPU(t *testing.T) { tc := testCase{ replicas: 3, desiredValue: 5000, + desiredRunningPods: 3, targetResource: "cpu-usage", targetTimestamp: 1, reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}}, @@ -224,9 +234,12 @@ func TestCPU(t *testing.T) { } func TestCPUPending(t *testing.T) { + desiredRequest := float64(2048 * 1000) tc := testCase{ - replicas: 4, + replicas: 5, desiredValue: 5000, + desiredRequest: &desiredRequest, + desiredRunningPods: 3, targetResource: "cpu-usage", targetTimestamp: 1, reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}}, @@ -237,12 +250,14 @@ func TestCPUPending(t *testing.T) { namespace := "test-namespace" podNamePrefix := "test-pod" podLabels := map[string]string{"name": podNamePrefix} + podRequest := []string{"1024", "2048", "3072", "200", "100"} for i := 0; i < tc.replicas; i++ { podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - pod := buildPod(namespace, podName, podLabels, api.PodRunning) + pod := buildPod(namespace, podName, podLabels, api.PodRunning, podRequest[i]) tc.podListOverride.Items = append(tc.podListOverride.Items, pod) } tc.podListOverride.Items[3].Status.Phase = api.PodPending + tc.podListOverride.Items[4].Status.Phase = api.PodFailed tc.runTest(t) } @@ -263,7 +278,7 @@ func TestCPUAllPending(t *testing.T) { podLabels := map[string]string{"name": podNamePrefix} for i := 0; i < tc.replicas; i++ { podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - pod := buildPod(namespace, podName, podLabels, api.PodPending) + pod := buildPod(namespace, podName, podLabels, api.PodPending, "2048") tc.podListOverride.Items = append(tc.podListOverride.Items, pod) } tc.runTest(t) @@ -295,7 +310,7 @@ func TestQPSPending(t *testing.T) { podLabels := map[string]string{"name": podNamePrefix} for i := 0; i < tc.replicas; i++ { podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - pod := buildPod(namespace, podName, podLabels, api.PodRunning) + pod := buildPod(namespace, podName, podLabels, api.PodRunning, "256") tc.podListOverride.Items = append(tc.podListOverride.Items, pod) } tc.podListOverride.Items[0].Status.Phase = api.PodPending @@ -317,7 +332,7 @@ func TestQPSAllPending(t *testing.T) { podLabels := map[string]string{"name": podNamePrefix} for i := 0; i < tc.replicas; i++ { podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - pod := buildPod(namespace, podName, podLabels, api.PodPending) + pod := buildPod(namespace, podName, podLabels, api.PodPending, "512") tc.podListOverride.Items = append(tc.podListOverride.Items, pod) } tc.podListOverride.Items[0].Status.Phase = api.PodPending @@ -328,6 +343,7 @@ func TestCPUSumEqualZero(t *testing.T) { tc := testCase{ replicas: 3, desiredValue: 0, + desiredRunningPods: 3, targetResource: "cpu-usage", targetTimestamp: 0, reportedPodMetrics: [][]int64{{0}, {0}, {0}}, @@ -351,6 +367,7 @@ func TestCPUMoreMetrics(t *testing.T) { tc := testCase{ replicas: 5, desiredValue: 5000, + desiredRunningPods: 5, targetResource: "cpu-usage", targetTimestamp: 10, reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}}, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index 55bff28d2db6..4f1a0bdb374b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -66,12 +66,12 @@ type ActualStateOfWorld interface { // the specified volume, an error is returned. SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName string, mounted bool) error - // ResetNodeStatusUpdateNeeded resets statusUpdateNeeded for the specified - // node to false indicating the AttachedVolume field of the Node's Status - // object has been updated. - // If no node with the name nodeName exists in list of attached nodes for - // the specified volume, an error is returned. - ResetNodeStatusUpdateNeeded(nodeName string) error + // SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified + // node to true indicating the AttachedVolume field in the Node's Status + // object needs to be updated by the node updater again. + // If the specifed node does not exist in the nodesToUpdateStatusFor list, + // log the error and return + SetNodeStatusUpdateNeeded(nodeName string) // ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach // request any more for the volume @@ -278,8 +278,17 @@ func (asw *actualStateOfWorld) AddVolumeNode( nodesAttachedTo: make(map[string]nodeAttachedTo), devicePath: devicePath, } - asw.attachedVolumes[volumeName] = volumeObj + } else { + // If volume object already exists, it indicates that the information would be out of date. + // Update the fields for volume object except the nodes attached to the volumes. + volumeObj.devicePath = devicePath + volumeObj.spec = volumeSpec + glog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q", + volumeName, + nodeName, + devicePath) } + asw.attachedVolumes[volumeName] = volumeObj _, nodeExists := volumeObj.nodesAttachedTo[nodeName] if !nodeExists { @@ -322,7 +331,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode( nodeObj.mountedByNode = mounted volumeObj.nodesAttachedTo[nodeName] = nodeObj - glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %q", + glog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t", volumeName, nodeName, mounted) @@ -433,21 +442,28 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached( } } -func (asw *actualStateOfWorld) ResetNodeStatusUpdateNeeded( - nodeName string) error { - asw.Lock() - defer asw.Unlock() - // Remove volume from volumes to report as attached +// Update the flag statusUpdateNeeded to indicate whether node status is already updated or +// needs to be updated again by the node status updater. +// If the specifed node does not exist in the nodesToUpdateStatusFor list, log the error and return +// This is an internal function and caller should acquire and release the lock +func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName string, needed bool) { nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName] if !nodeToUpdateExists { - return fmt.Errorf( - "failed to ResetNodeStatusUpdateNeeded(nodeName=%q) nodeName does not exist", + // should not happen + glog.Errorf( + "Failed to set statusUpdateNeeded to needed %t because nodeName=%q does not exist", + needed, nodeName) } - nodeToUpdate.statusUpdateNeeded = false + nodeToUpdate.statusUpdateNeeded = needed asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate - return nil +} + +func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName string) { + asw.Lock() + defer asw.Unlock() + asw.updateNodeStatusUpdateNeeded(nodeName, true) } func (asw *actualStateOfWorld) DeleteVolumeNode( @@ -529,7 +545,7 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[string][]api.Att defer asw.RUnlock() volumesToReportAttached := make(map[string][]api.AttachedVolume) - for _, nodeToUpdateObj := range asw.nodesToUpdateStatusFor { + for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor { if nodeToUpdateObj.statusUpdateNeeded { attachedVolumes := make( []api.AttachedVolume, @@ -544,6 +560,10 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[string][]api.Att } volumesToReportAttached[nodeToUpdateObj.nodeName] = attachedVolumes } + // When GetVolumesToReportAttached is called by node status updater, the current status + // of this node will be updated, so set the flag statusUpdateNeeded to false indicating + // the current status is already updated. + asw.updateNodeStatusUpdateNeeded(nodeName, false) } return volumesToReportAttached @@ -557,6 +577,7 @@ func getAttachedVolume( VolumeName: attachedVolume.volumeName, VolumeSpec: attachedVolume.spec, NodeName: nodeAttachedTo.nodeName, + DevicePath: attachedVolume.devicePath, PluginIsAttachable: true, }, MountedByNode: nodeAttachedTo.mountedByNode, diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go index 8379fab83c02..1daf4700a19a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go @@ -55,7 +55,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Calls AddVolumeNode() twice. Second time use a different node name. @@ -104,8 +104,8 @@ func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node1Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node2Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node1Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node2Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Calls AddVolumeNode() twice. Uses the same volume and node both times. @@ -148,7 +148,7 @@ func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), nodeName, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -253,7 +253,7 @@ func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node2Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node2Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -285,7 +285,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume1/node1 entry. @@ -318,7 +318,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), node1Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), node1Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Calls VolumeNodeExists() on empty data struct. @@ -384,7 +384,7 @@ func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with two volume/node entries (different node and volume). @@ -418,8 +418,8 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volume1Name), node1Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName2, string(volume2Name), node2Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volume1Name), node1Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName2, string(volume2Name), node2Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with two volume/node entries (same volume different node). @@ -458,8 +458,8 @@ func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <2> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node1Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node2Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node1Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node2Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -485,7 +485,7 @@ func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -521,7 +521,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, false /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, false /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -553,7 +553,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -594,7 +594,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotRes t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, false /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, false /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -640,7 +640,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, false /* expectedMountedByNode */, true /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, false /* expectedMountedByNode */, true /* expectNonZeroDetachRequestedTime */) if !expectedDetachRequestedTime.Equal(attachedVolumes[0].DetachRequestedTime) { t.Fatalf("DetachRequestedTime changed. Expected: <%v> Actual: <%v>", expectedDetachRequestedTime, attachedVolumes[0].DetachRequestedTime) } @@ -669,7 +669,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Set(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -704,7 +704,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Marked(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, true /* expectedMountedByNode */, true /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, true /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -747,7 +747,7 @@ func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -791,7 +791,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, false /* expectedMountedByNode */, true /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, false /* expectedMountedByNode */, true /* expectNonZeroDetachRequestedTime */) } // Populates data struct with one volume/node entry. @@ -851,7 +851,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive( reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() volumes, exists := reportAsAttachedVolumesMap[nodeName] if !exists { - t.Fatalf("MarkDesireToDetach_UnmarkDesireToDetach failed. Expected: Actual: Actual: 0 { t.Fatalf("len(reportAsAttachedVolumes) Expected: <0> Actual: <%v>", len(volumes)) @@ -861,7 +861,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive( reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached() volumes, exists = reportAsAttachedVolumesMap[nodeName] if !exists { - t.Fatalf("MarkDesireToDetach_UnmarkDesireToDetach failed. Expected: Actual: Actual: Actual: <%v>", len(volumes)) @@ -871,9 +871,9 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive( // Populates data struct with one volume/node entry. // Calls RemoveVolumeFromReportAsAttached // Calls DeleteVolumeNode -// Calls AddVolumeToReportAsAttached +// Calls AddVolumeNode // Verifyies there is no volume as reported as attached -func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Negative(t *testing.T) { +func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) { // Arrange volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) asw := NewActualStateOfWorld(volumePluginMgr) @@ -894,22 +894,23 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Negative( reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() volumes, exists := reportAsAttachedVolumesMap[nodeName] if !exists { - t.Fatalf("MarkDesireToDetach_UnmarkDesireToDetach failed. Expected: Actual: Actual: 0 { t.Fatalf("len(reportAsAttachedVolumes) Expected: <0> Actual: <%v>", len(volumes)) } asw.DeleteVolumeNode(generatedVolumeName, nodeName) - asw.AddVolumeToReportAsAttached(generatedVolumeName, nodeName) + + asw.AddVolumeNode(volumeSpec, nodeName, "" /*device path*/) reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached() volumes, exists = reportAsAttachedVolumesMap[nodeName] if !exists { - t.Fatalf("MarkDesireToDetach_UnmarkDesireToDetach failed. Expected: Actual: Actual: 0 { - t.Fatalf("len(reportAsAttachedVolumes) Expected: <0> Actual: <%v>", len(volumes)) + if len(volumes) != 1 { + t.Fatalf("len(reportAsAttachedVolumes) Expected: <1> Actual: <%v>", len(volumes)) } } @@ -949,33 +950,6 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) { } } -func verifyAttachedVolume( - t *testing.T, - attachedVolumes []AttachedVolume, - expectedVolumeName api.UniqueVolumeName, - expectedVolumeSpecName string, - expectedNodeName string, - expectedMountedByNode, - expectNonZeroDetachRequestedTime bool) { - for _, attachedVolume := range attachedVolumes { - if attachedVolume.VolumeName == expectedVolumeName && - attachedVolume.VolumeSpec.Name() == expectedVolumeSpecName && - attachedVolume.NodeName == expectedNodeName && - attachedVolume.MountedByNode == expectedMountedByNode && - attachedVolume.DetachRequestedTime.IsZero() == !expectNonZeroDetachRequestedTime { - return - } - } - - t.Fatalf( - "attachedVolumes (%v) should contain the volume/node combo %q/%q with MountedByNode=%v and NonZeroDetachRequestedTime=%v. It does not.", - attachedVolumes, - expectedVolumeName, - expectedNodeName, - expectedMountedByNode, - expectNonZeroDetachRequestedTime) -} - func Test_GetAttachedVolumesForNode_Positive_NoVolumesOrNodes(t *testing.T) { // Arrange volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) @@ -1012,7 +986,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) { @@ -1043,7 +1017,7 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName2, string(volume2Name), node2Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName2, string(volume2Name), node2Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) } func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) { @@ -1079,5 +1053,72 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) { t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) } - verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node1Name, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName1, string(volumeName), node1Name, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) +} + +func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) { + // Arrange + volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) + asw := NewActualStateOfWorld(volumePluginMgr) + volumeName := api.UniqueVolumeName("volume-name") + volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) + node1Name := "node1-name" + devicePath1 := "fake/device/path1" + generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name, devicePath1) + if add1Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) + } + node2Name := "node2-name" + devicePath2 := "fake/device/path2" + generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeSpec, node2Name, devicePath2) + if add2Err != nil { + t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) + } + + if generatedVolumeName1 != generatedVolumeName2 { + t.Fatalf( + "Generated volume names for the same volume should be the same but they are not: %q and %q", + generatedVolumeName1, + generatedVolumeName2) + } + + // Act + attachedVolumes := asw.GetAttachedVolumesForNode(node2Name) + + // Assert + if len(attachedVolumes) != 1 { + t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) + } + + verifyAttachedVolume(t, attachedVolumes, generatedVolumeName2, string(volumeName), node2Name, devicePath2, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) +} + +func verifyAttachedVolume( + t *testing.T, + attachedVolumes []AttachedVolume, + expectedVolumeName api.UniqueVolumeName, + expectedVolumeSpecName string, + expectedNodeName string, + expectedDevicePath string, + expectedMountedByNode, + expectNonZeroDetachRequestedTime bool) { + for _, attachedVolume := range attachedVolumes { + if attachedVolume.VolumeName == expectedVolumeName && + attachedVolume.VolumeSpec.Name() == expectedVolumeSpecName && + attachedVolume.NodeName == expectedNodeName && + attachedVolume.DevicePath == expectedDevicePath && + attachedVolume.MountedByNode == expectedMountedByNode && + attachedVolume.DetachRequestedTime.IsZero() == !expectNonZeroDetachRequestedTime { + return + } + } + + t.Fatalf( + "attachedVolumes (%v) should contain the volume/node combo %q/%q with DevicePath=%q MountedByNode=%v and NonZeroDetachRequestedTime=%v. It does not.", + attachedVolumes, + expectedVolumeName, + expectedNodeName, + expectedDevicePath, + expectedMountedByNode, + expectNonZeroDetachRequestedTime) } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index 0e1de23a68e9..8b91c8774b5f 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" + "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/util/strategicpatch" ) @@ -62,20 +63,30 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { for nodeName, attachedVolumes := range nodesToUpdate { nodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(nodeName) if nodeObj == nil || !exists || err != nil { - // If node does not exist, its status cannot be updated, log error and move on. - glog.V(5).Infof( + // If node does not exist, its status cannot be updated, log error and + // reset flag statusUpdateNeeded back to true to indicate this node status + // needs to be udpated again + glog.V(2).Infof( "Could not update node status. Failed to find node %q in NodeInformer cache. %v", nodeName, err) + nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) continue } - node, ok := nodeObj.(*api.Node) + clonedNode, err := conversion.NewCloner().DeepCopy(nodeObj) + if err != nil { + return fmt.Errorf("error cloning node %q: %v", + nodeName, + err) + } + + node, ok := clonedNode.(*api.Node) if !ok || node == nil { return fmt.Errorf( "failed to cast %q object %#v to Node", nodeName, - nodeObj) + clonedNode) } oldData, err := json.Marshal(node) @@ -107,24 +118,20 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { _, err = nsu.kubeClient.Core().Nodes().PatchStatus(nodeName, patchBytes) if err != nil { + // If update node status fails, reset flag statusUpdateNeeded back to true + // to indicate this node status needs to be udpated again + nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) return fmt.Errorf( "failed to kubeClient.Core().Nodes().Patch for node %q. %v", nodeName, err) } - - err = nsu.actualStateOfWorld.ResetNodeStatusUpdateNeeded(nodeName) - if err != nil { - return fmt.Errorf( - "failed to ResetNodeStatusUpdateNeeded for node %q. %v", - nodeName, - err) - } - - glog.V(3).Infof( - "Updating status for node %q succeeded. patchBytes: %q", + glog.V(2).Infof( + "Updating status for node %q succeeded. patchBytes: %q VolumesAttached: %v", nodeName, - string(patchBytes)) + string(patchBytes), + node.Status.VolumesAttached) + } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/genericapiserver/genericapiserver.go b/vendor/k8s.io/kubernetes/pkg/genericapiserver/genericapiserver.go index 01e88bda86d3..d966e7843d75 100644 --- a/vendor/k8s.io/kubernetes/pkg/genericapiserver/genericapiserver.go +++ b/vendor/k8s.io/kubernetes/pkg/genericapiserver/genericapiserver.go @@ -501,6 +501,7 @@ func (s *GenericAPIServer) init(c *Config) { attributeGetter := apiserver.NewRequestAttributeGetter(s.RequestContextMapper, s.NewRequestInfoResolver()) handler = apiserver.WithAuthorizationCheck(handler, attributeGetter, s.authorizer) + handler = apiserver.WithImpersonation(handler, s.RequestContextMapper, s.authorizer) if len(c.AuditLogPath) != 0 { // audit handler must comes before the impersonationFilter to read the original user writer := &lumberjack.Logger{ @@ -510,9 +511,7 @@ func (s *GenericAPIServer) init(c *Config) { MaxSize: c.AuditLogMaxSize, } handler = audit.WithAudit(handler, attributeGetter, writer) - defer writer.Close() } - handler = apiserver.WithImpersonation(handler, s.RequestContextMapper, s.authorizer) // Install Authenticator if c.Authenticator != nil { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go index 561244c28112..54bb3cfb60ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/helpers_linux.go @@ -25,7 +25,7 @@ import ( // GetCgroupSubsystems returns information about the mounted cgroup subsystems func GetCgroupSubsystems() (*CgroupSubsystems, error) { // get all cgroup mounts. - allCgroups, err := libcontainercgroups.GetCgroupMounts() + allCgroups, err := libcontainercgroups.GetCgroupMounts(true) if err != nil { return &CgroupSubsystems{}, err } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go index 1a435454e49b..14629ebf211e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go @@ -158,9 +158,6 @@ const ( // Period for performing image garbage collection. ImageGCPeriod = 5 * time.Minute - // maxImagesInStatus is the number of max images we store in image status. - maxImagesInNodeStatus = 50 - // Minimum number of dead containers to keep in a pod minDeadContainerInPod = 1 ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go index 5e24ac58d9ad..e874392ed7ce 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go @@ -39,6 +39,15 @@ import ( "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) +const ( + // maxImagesInNodeStatus is the number of max images we store in image status. + maxImagesInNodeStatus = 50 + + // maxNamesPerImageInNodeStatus is max number of names per image stored in + // the node status. + maxNamesPerImageInNodeStatus = 5 +) + // registerWithApiServer registers the node with the cluster master. It is safe // to call multiple times, but not concurrently (kl.registrationCompleted is // not locked). @@ -524,8 +533,13 @@ func (kl *Kubelet) setNodeStatusImages(node *api.Node) { } for _, image := range containerImages { + names := append(image.RepoDigests, image.RepoTags...) + // Report up to maxNamesPerImageInNodeStatus names per image. + if len(names) > maxNamesPerImageInNodeStatus { + names = names[0:maxNamesPerImageInNodeStatus] + } imagesOnNode = append(imagesOnNode, api.ContainerImage{ - Names: append(image.RepoTags, image.RepoDigests...), + Names: names, SizeBytes: image.Size, }) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_test.go index 3287cb6857a4..027e8644e6a2 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_test.go @@ -44,6 +44,10 @@ import ( "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) +const ( + maxImageTagsForTest = 20 +) + // generateTestingImageList generate randomly generated image list and corresponding expectedImageList. func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) { // imageList is randomly generated image list @@ -64,7 +68,7 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []api.Container var expectedImageList []api.ContainerImage for _, kubeImage := range imageList { apiImage := api.ContainerImage{ - Names: kubeImage.RepoTags, + Names: kubeImage.RepoTags[0:maxNamesPerImageInNodeStatus], SizeBytes: kubeImage.Size, } @@ -76,7 +80,9 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []api.Container func generateImageTags() []string { var tagList []string - count := rand.IntnRange(1, maxImageTagsForTest+1) + // Generate > maxNamesPerImageInNodeStatus tags so that the test can verify + // that kubelet report up to maxNamesPerImageInNodeStatus tags. + count := rand.IntnRange(maxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1) for ; count > 0; count-- { tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count)) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go index d6b71a5d6394..d1769f86b83f 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_test.go @@ -86,8 +86,6 @@ const ( testReservationCPU = "200m" testReservationMemory = "100M" - maxImageTagsForTest = 3 - // TODO(harry) any global place for these two? // Reasonable size range of all container images. 90%ile of images on dockerhub drops into this range. minImgSize int64 = 23 * 1024 * 1024 diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go index 3838b31558fd..a11b711d16b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go @@ -40,6 +40,12 @@ func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume podVolumes := kl.volumeManager.GetMountedVolumesForPod( volumetypes.UniquePodName(podUID)) for outerVolumeSpecName, volume := range podVolumes { + // TODO: volume.Mounter could be nil if volume object is recovered + // from reconciler's sync state process. PR 33616 will fix this problem + // to create Mounter object when recovering volume state. + if volume.Mounter == nil { + continue + } volumesToReturn[outerVolumeSpecName] = volume.Mounter } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 5eaa4eb5003f..8fea2d80f8b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -366,8 +366,15 @@ func (asw *actualStateOfWorld) addVolume( globallyMounted: false, devicePath: devicePath, } - asw.attachedVolumes[volumeName] = volumeObj + } else { + // If volume object already exists, update the fields such as device path + volumeObj.devicePath = devicePath + volumeObj.spec = volumeSpec + glog.V(2).Infof("Volume %q is already added to attachedVolume list, update device path %q", + volumeName, + devicePath) } + asw.attachedVolumes[volumeName] = volumeObj return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go b/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go index e2d6a4866f57..c949de426665 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go +++ b/vendor/k8s.io/kubernetes/pkg/util/wait/wait.go @@ -193,6 +193,11 @@ func PollInfinite(interval time.Duration, condition ConditionFunc) error { return WaitFor(poller(interval, 0), condition, done) } +// PollUntil is like Poll, but it takes a stop change instead of total duration +func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { + return WaitFor(poller(interval, 0), condition, stopCh) +} + // WaitFunc creates a channel that receives an item every time a test // should be executed and is closed when the last test should be invoked. type WaitFunc func(done <-chan struct{}) <-chan struct{} diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go index a60d4519a903..ce981e3e8cc5 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/gcm.go @@ -23,10 +23,10 @@ import ( "k8s.io/kubernetes/pkg/api" + gce "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/google" gcm "google.golang.org/api/cloudmonitoring/v2beta2" - gce "google.golang.org/cloud/compute/metadata" ) const ( diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go index f0dddc4c6794..c316ed220800 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -968,7 +968,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *api.Pod, allPods if err != nil { return nil, err } - if affinity.PodAntiAffinity != nil { + if affinity != nil && affinity.PodAntiAffinity != nil { existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { return nil, err diff --git a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/cache.go b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/cache.go index 556aea308268..985f9ed0f73d 100644 --- a/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/cache.go +++ b/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache/cache.go @@ -244,12 +244,12 @@ func (cache *schedulerCache) RemovePod(pod *api.Pod) error { cache.mu.Lock() defer cache.mu.Unlock() - _, ok := cache.podStates[key] + cachedstate, ok := cache.podStates[key] switch { // An assumed pod won't have Delete/Remove event. It needs to have Add event // before Remove event, in which case the state would change from Assumed to Added. case ok && !cache.assumedPods[key]: - err := cache.removePod(pod) + err := cache.removePod(cachedstate.pod) if err != nil { return err } diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go b/vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go index a13ae3d45640..4be8e02121e0 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go @@ -609,12 +609,12 @@ func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.Container defer r.lock.Unlock() for _, name := range r.containers { cStats, ok := cStatsMap[name] - if !ok || cStats.CPU == nil { + if !ok { Logf("Missing info/stats for container %q on node %q", name, r.node) return } - if oldStats, ok := oldStatsMap[name]; ok && oldStats.CPU != nil { + if oldStats, ok := oldStatsMap[name]; ok { if oldStats.CPU.Time.Equal(cStats.CPU.Time) { // No change -> skip this stat. continue