From 55350a81748bed81ddbdcb3f9e57bd0e601444af Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Thu, 29 Aug 2019 16:37:39 -0700 Subject: [PATCH 001/421] Implemented Protobuf --- Gopkg.lock | 2 + Makefile | 1 + api/openapi-spec/swagger.json | 68 +- hack/generate-proto.sh | 125 + pkg/apis/api-rules/violation_exceptions.list | 16 + pkg/apis/workflow/v1alpha1/generated.pb.go | 16255 ++++++++++++++++ pkg/apis/workflow/v1alpha1/generated.proto | 889 + pkg/apis/workflow/v1alpha1/item.go | 176 + pkg/apis/workflow/v1alpha1/item_test.go | 38 + .../workflow/v1alpha1/openapi_generated.go | 120 +- .../v1alpha1/workflow_template_types.go | 12 +- pkg/apis/workflow/v1alpha1/workflow_types.go | 472 +- .../v1alpha1/zz_generated.deepcopy.go | 71 +- workflow/controller/operator.go | 38 +- workflow/controller/operator_test.go | 26 +- workflow/controller/steps.go | 4 +- workflow/util/util.go | 5 +- workflow/validate/validate.go | 13 +- 18 files changed, 17994 insertions(+), 337 deletions(-) create mode 100755 hack/generate-proto.sh create mode 100644 pkg/apis/workflow/v1alpha1/generated.pb.go create mode 100644 pkg/apis/workflow/v1alpha1/generated.proto create mode 100644 pkg/apis/workflow/v1alpha1/item.go create mode 100644 pkg/apis/workflow/v1alpha1/item_test.go diff --git a/Gopkg.lock b/Gopkg.lock index ee51680fedda..e3bd5691f5e9 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1381,6 +1381,8 @@ "github.com/evanphx/json-patch", "github.com/ghodss/yaml", "github.com/go-openapi/spec", + "github.com/gogo/protobuf/proto", + "github.com/gogo/protobuf/sortkeys", "github.com/gorilla/websocket", "github.com/mitchellh/go-ps", "github.com/pkg/errors", diff --git a/Makefile b/Makefile index c238fce4bd2c..86a91482ce55 100644 --- a/Makefile +++ b/Makefile @@ -146,6 +146,7 @@ cover: .PHONY: codegen codegen: + ./hack/generate-proto.sh ./hack/update-codegen.sh ./hack/update-openapigen.sh go run ./hack/gen-openapi-spec/main.go ${VERSION} > ${CURRENT_DIR}/api/openapi-spec/swagger.json diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index e352a4f3e468..6f43a3adc883 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -286,7 +286,7 @@ "depth": { "description": "Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip", "type": "integer", - "format": "int32" + "format": "int64" }, "fetch": { "description": "Fetch specifies a number of refs that should be fetched before checkout", @@ -486,6 +486,45 @@ "type": "string", "format": "item" }, + "io.argoproj.workflow.v1alpha1.ItemValue": { + "type": "object", + "required": [ + "Type", + "NumVal", + "BoolVal", + "StrVal", + "MapVal", + "ListVal" + ], + "properties": { + "BoolVal": { + "type": "boolean" + }, + "ListVal": { + "type": "array", + "items": { + "type": "string", + "format": "byte" + } + }, + "MapVal": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "NumVal": { + "type": "string" + }, + "StrVal": { + "type": "string" + }, + "Type": { + "type": "integer", + "format": "int32" + } + } + }, "io.argoproj.workflow.v1alpha1.Metadata": { "description": "Pod metdata", "type": "object", @@ -532,6 +571,20 @@ } } }, + "io.argoproj.workflow.v1alpha1.ParallelSteps": { + "type": "object", + "required": [ + "Steps" + ], + "properties": { + "Steps": { + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStep" + } + } + } + }, "io.argoproj.workflow.v1alpha1.Parameter": { "description": "Parameter indicate a passed string parameter to a service template with an optional default value", "type": "object", @@ -989,10 +1042,7 @@ "description": "Steps define a series of sequential/parallel workflow steps", "type": "array", "items": { - "type": "array", - "items": { - "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStep" - } + "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ParallelSteps" } }, "suspend": { @@ -1203,14 +1253,6 @@ "status" ], "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "type": "string" - }, "metadata": { "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, diff --git a/hack/generate-proto.sh b/hack/generate-proto.sh new file mode 100755 index 000000000000..b48843e5c715 --- /dev/null +++ b/hack/generate-proto.sh @@ -0,0 +1,125 @@ +#!/bin/bash + +# This script auto-generates protobuf related files. It is intended to be run manually when either +# API types are added/modified, or server gRPC calls are added. The generated files should then +# be checked into source control. + +set -x +set -o errexit +set -o nounset +set -o pipefail + +PROJECT_ROOT=$(cd $(dirname ${BASH_SOURCE})/..; pwd) +CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${PROJECT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} +PATH="${PROJECT_ROOT}/dist:${PATH}" + +# protbuf tooling required to build .proto files from go annotations from k8s-like api types +go build -i -o dist/go-to-protobuf ./vendor/k8s.io/code-generator/cmd/go-to-protobuf +go build -i -o dist/protoc-gen-gogo ./vendor/k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo + +cp dist/protoc-gen-gogo $GOPATH/bin + +# Generate pkg/apis///(generated.proto,generated.pb.go) +# NOTE: any dependencies of our types to the k8s.io apimachinery types should be added to the +# --apimachinery-packages= option so that go-to-protobuf can locate the types, but prefixed with a +# '-' so that go-to-protobuf will not generate .proto files for it. +PACKAGES=( + github.com/argoproj/argo/pkg/apis/workflow/v1alpha1 +) + +APIMACHINERY_PKGS=( + +k8s.io/apimachinery/pkg/util/intstr + +k8s.io/apimachinery/pkg/api/resource + +k8s.io/apimachinery/pkg/runtime/schema + +k8s.io/apimachinery/pkg/runtime + k8s.io/apimachinery/pkg/apis/meta/v1 + k8s.io/api/core/v1 +) +go-to-protobuf \ + --go-header-file=${PROJECT_ROOT}/hack/custom-boilerplate.go.txt \ + --packages=$(IFS=, ; echo "${PACKAGES[*]}") \ + --apimachinery-packages=$(IFS=, ; echo "${APIMACHINERY_PKGS[*]}") \ + --proto-import=./vendor + +# Either protoc-gen-go, protoc-gen-gofast, or protoc-gen-gogofast can be used to build +# server/*/.pb.go from .proto files. golang/protobuf and gogo/protobuf can be used +# interchangeably. The difference in the options are: +# 1. protoc-gen-go - official golang/protobuf +#go build -i -o dist/protoc-gen-go ./vendor/github.com/golang/protobuf/protoc-gen-go +#GOPROTOBINARY=go +# 2. protoc-gen-gofast - fork of golang golang/protobuf. Faster code generation +#go build -i -o dist/protoc-gen-gofast ./vendor/github.com/gogo/protobuf/protoc-gen-gofast +#GOPROTOBINARY=gofast +# 3. protoc-gen-gogofas'export PS1="\[\033[36m\]\u\[\033[m\]@\[\033[32m\]\h:\[\033[33;1m\]\w\[\033[m\]\$ "t - faster code generation and gogo extensions and flexibility in controlling +# the generated go code (e.g. customizing field names, nullable fields) +#go build -i -o dist/protoc-gen-gogofast ./vendor/github.com/gogo/protobuf/protoc-gen-gogofast +#GOPROTOBINARY=gogofast +# +## protoc-gen-grpc-gateway is used to build .pb.gw.go files from from .proto files +#go build -i -o dist/protoc-gen-grpc-gateway ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway +## protoc-gen-swagger is used to build swagger.json +#go build -i -o dist/protoc-gen-swagger ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger +# +## Generate server//(.pb.go|.pb.gw.go) +#PROTO_FILES=$(find $PROJECT_ROOT \( -name "*.proto" -and -path '*/server/*' -or -path '*/reposerver/*' -and -name "*.proto" \)) +#for i in ${PROTO_FILES}; do +# # Path to the google API gateway annotations.proto will be different depending if we are +# # building natively (e.g. from workspace) vs. part of a docker build. +# if [ -f /.dockerenv ]; then +# GOOGLE_PROTO_API_PATH=$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis +# GOGO_PROTOBUF_PATH=$GOPATH/src/github.com/gogo/protobuf +# else +# GOOGLE_PROTO_API_PATH=${PROJECT_ROOT}/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis +# GOGO_PROTOBUF_PATH=${PROJECT_ROOT}/vendor/github.com/gogo/protobuf +# fi +# protoc \ +# -I${PROJECT_ROOT} \ +# -I/usr/local/include \ +# -I./vendor \ +# -I$GOPATH/src \ +# -I${GOOGLE_PROTO_API_PATH} \ +# -I${GOGO_PROTOBUF_PATH} \ +# --${GOPROTOBINARY}_out=plugins=grpc:$GOPATH/src \ +# --grpc-gateway_out=logtostderr=true:$GOPATH/src \ +# --swagger_out=logtostderr=true:. \ +# $i +#done +# +## collect_swagger gathers swagger files into a subdirectory +#collect_swagger() { +# SWAGGER_ROOT="$1" +# EXPECTED_COLLISIONS="$2" +# SWAGGER_OUT="${PROJECT_ROOT}/assets/swagger.json" +# PRIMARY_SWAGGER=`mktemp` +# COMBINED_SWAGGER=`mktemp` +# +# cat < "${PRIMARY_SWAGGER}" +#{ +# "swagger": "2.0", +# "info": { +# "title": "Consolidate Services", +# "description": "Description of all APIs", +# "version": "version not set" +# }, +# "paths": {} +#} +#EOF +# +# /bin/rm -f "${SWAGGER_OUT}" +# +# /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -exec /usr/local/bin/swagger mixin -c "${EXPECTED_COLLISIONS}" "${PRIMARY_SWAGGER}" '{}' \+ > "${COMBINED_SWAGGER}" +# /usr/local/bin/jq -r 'del(.definitions[].properties[]? | select(."$ref"!=null and .description!=null).description) | del(.definitions[].properties[]? | select(."$ref"!=null and .title!=null).title)' "${COMBINED_SWAGGER}" > "${SWAGGER_OUT}" +# +# /bin/rm "${PRIMARY_SWAGGER}" "${COMBINED_SWAGGER}" +#} +# +## clean up generated swagger files (should come after collect_swagger) +#clean_swagger() { +# SWAGGER_ROOT="$1" +# /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -delete +#} +# +#collect_swagger server 21 +#clean_swagger server +#clean_swagger reposerver +#clean_swagger controller \ No newline at end of file diff --git a/pkg/apis/api-rules/violation_exceptions.list b/pkg/apis/api-rules/violation_exceptions.list index 5fb13360735c..ef0ee9eda0f9 100644 --- a/pkg/apis/api-rules/violation_exceptions.list +++ b/pkg/apis/api-rules/violation_exceptions.list @@ -7,8 +7,11 @@ API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,HDFSConfig,Addresses API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Inputs,Artifacts API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Inputs,Parameters +API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Item,ListVal +API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,ItemValue,ListVal API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Outputs,Artifacts API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Outputs,Parameters +API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,ParallelSteps,Steps API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Template,HostAliases API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Template,InitContainers API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Template,Sidecars @@ -25,3 +28,16 @@ API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,WorkflowStep,WithItems API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,WorkflowTemplateList,Items API rule violation: list_type_missing,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,WorkflowTemplateSpec,Templates +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Item,BoolVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Item,ListVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Item,MapVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Item,NumVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Item,StrVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,Item,Type +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,ItemValue,BoolVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,ItemValue,ListVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,ItemValue,MapVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,ItemValue,NumVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,ItemValue,StrVal +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,ItemValue,Type +API rule violation: names_match,github.com/argoproj/argo/pkg/apis/workflow/v1alpha1,ParallelSteps,Steps diff --git a/pkg/apis/workflow/v1alpha1/generated.pb.go b/pkg/apis/workflow/v1alpha1/generated.pb.go new file mode 100644 index 000000000000..c9766bfd6664 --- /dev/null +++ b/pkg/apis/workflow/v1alpha1/generated.pb.go @@ -0,0 +1,16255 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/argoproj/argo/pkg/apis/workflow/v1alpha1/generated.proto + +package v1alpha1 + +import ( + encoding_json "encoding/json" + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + + math "math" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *ArchiveStrategy) Reset() { *m = ArchiveStrategy{} } +func (*ArchiveStrategy) ProtoMessage() {} +func (*ArchiveStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{0} +} +func (m *ArchiveStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArchiveStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArchiveStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArchiveStrategy.Merge(m, src) +} +func (m *ArchiveStrategy) XXX_Size() int { + return m.Size() +} +func (m *ArchiveStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_ArchiveStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_ArchiveStrategy proto.InternalMessageInfo + +func (m *Arguments) Reset() { *m = Arguments{} } +func (*Arguments) ProtoMessage() {} +func (*Arguments) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{1} +} +func (m *Arguments) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Arguments) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Arguments) XXX_Merge(src proto.Message) { + xxx_messageInfo_Arguments.Merge(m, src) +} +func (m *Arguments) XXX_Size() int { + return m.Size() +} +func (m *Arguments) XXX_DiscardUnknown() { + xxx_messageInfo_Arguments.DiscardUnknown(m) +} + +var xxx_messageInfo_Arguments proto.InternalMessageInfo + +func (m *Artifact) Reset() { *m = Artifact{} } +func (*Artifact) ProtoMessage() {} +func (*Artifact) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{2} +} +func (m *Artifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Artifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Artifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_Artifact.Merge(m, src) +} +func (m *Artifact) XXX_Size() int { + return m.Size() +} +func (m *Artifact) XXX_DiscardUnknown() { + xxx_messageInfo_Artifact.DiscardUnknown(m) +} + +var xxx_messageInfo_Artifact proto.InternalMessageInfo + +func (m *ArtifactLocation) Reset() { *m = ArtifactLocation{} } +func (*ArtifactLocation) ProtoMessage() {} +func (*ArtifactLocation) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{3} +} +func (m *ArtifactLocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactLocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactLocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactLocation.Merge(m, src) +} +func (m *ArtifactLocation) XXX_Size() int { + return m.Size() +} +func (m *ArtifactLocation) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactLocation.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactLocation proto.InternalMessageInfo + +func (m *ArtifactRepositoryRef) Reset() { *m = ArtifactRepositoryRef{} } +func (*ArtifactRepositoryRef) ProtoMessage() {} +func (*ArtifactRepositoryRef) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{4} +} +func (m *ArtifactRepositoryRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactRepositoryRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactRepositoryRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactRepositoryRef.Merge(m, src) +} +func (m *ArtifactRepositoryRef) XXX_Size() int { + return m.Size() +} +func (m *ArtifactRepositoryRef) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactRepositoryRef.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactRepositoryRef proto.InternalMessageInfo + +func (m *ArtifactoryArtifact) Reset() { *m = ArtifactoryArtifact{} } +func (*ArtifactoryArtifact) ProtoMessage() {} +func (*ArtifactoryArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{5} +} +func (m *ArtifactoryArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactoryArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactoryArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactoryArtifact.Merge(m, src) +} +func (m *ArtifactoryArtifact) XXX_Size() int { + return m.Size() +} +func (m *ArtifactoryArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactoryArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactoryArtifact proto.InternalMessageInfo + +func (m *ArtifactoryAuth) Reset() { *m = ArtifactoryAuth{} } +func (*ArtifactoryAuth) ProtoMessage() {} +func (*ArtifactoryAuth) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{6} +} +func (m *ArtifactoryAuth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ArtifactoryAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ArtifactoryAuth) XXX_Merge(src proto.Message) { + xxx_messageInfo_ArtifactoryAuth.Merge(m, src) +} +func (m *ArtifactoryAuth) XXX_Size() int { + return m.Size() +} +func (m *ArtifactoryAuth) XXX_DiscardUnknown() { + xxx_messageInfo_ArtifactoryAuth.DiscardUnknown(m) +} + +var xxx_messageInfo_ArtifactoryAuth proto.InternalMessageInfo + +func (m *ContinueOn) Reset() { *m = ContinueOn{} } +func (*ContinueOn) ProtoMessage() {} +func (*ContinueOn) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{7} +} +func (m *ContinueOn) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContinueOn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContinueOn) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContinueOn.Merge(m, src) +} +func (m *ContinueOn) XXX_Size() int { + return m.Size() +} +func (m *ContinueOn) XXX_DiscardUnknown() { + xxx_messageInfo_ContinueOn.DiscardUnknown(m) +} + +var xxx_messageInfo_ContinueOn proto.InternalMessageInfo + +func (m *DAGTask) Reset() { *m = DAGTask{} } +func (*DAGTask) ProtoMessage() {} +func (*DAGTask) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{8} +} +func (m *DAGTask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DAGTask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DAGTask) XXX_Merge(src proto.Message) { + xxx_messageInfo_DAGTask.Merge(m, src) +} +func (m *DAGTask) XXX_Size() int { + return m.Size() +} +func (m *DAGTask) XXX_DiscardUnknown() { + xxx_messageInfo_DAGTask.DiscardUnknown(m) +} + +var xxx_messageInfo_DAGTask proto.InternalMessageInfo + +func (m *DAGTemplate) Reset() { *m = DAGTemplate{} } +func (*DAGTemplate) ProtoMessage() {} +func (*DAGTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{9} +} +func (m *DAGTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DAGTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DAGTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_DAGTemplate.Merge(m, src) +} +func (m *DAGTemplate) XXX_Size() int { + return m.Size() +} +func (m *DAGTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_DAGTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_DAGTemplate proto.InternalMessageInfo + +func (m *GitArtifact) Reset() { *m = GitArtifact{} } +func (*GitArtifact) ProtoMessage() {} +func (*GitArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{10} +} +func (m *GitArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitArtifact.Merge(m, src) +} +func (m *GitArtifact) XXX_Size() int { + return m.Size() +} +func (m *GitArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_GitArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_GitArtifact proto.InternalMessageInfo + +func (m *HDFSArtifact) Reset() { *m = HDFSArtifact{} } +func (*HDFSArtifact) ProtoMessage() {} +func (*HDFSArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{11} +} +func (m *HDFSArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HDFSArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HDFSArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_HDFSArtifact.Merge(m, src) +} +func (m *HDFSArtifact) XXX_Size() int { + return m.Size() +} +func (m *HDFSArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_HDFSArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_HDFSArtifact proto.InternalMessageInfo + +func (m *HDFSConfig) Reset() { *m = HDFSConfig{} } +func (*HDFSConfig) ProtoMessage() {} +func (*HDFSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{12} +} +func (m *HDFSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HDFSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HDFSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_HDFSConfig.Merge(m, src) +} +func (m *HDFSConfig) XXX_Size() int { + return m.Size() +} +func (m *HDFSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_HDFSConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_HDFSConfig proto.InternalMessageInfo + +func (m *HDFSKrbConfig) Reset() { *m = HDFSKrbConfig{} } +func (*HDFSKrbConfig) ProtoMessage() {} +func (*HDFSKrbConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{13} +} +func (m *HDFSKrbConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HDFSKrbConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HDFSKrbConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_HDFSKrbConfig.Merge(m, src) +} +func (m *HDFSKrbConfig) XXX_Size() int { + return m.Size() +} +func (m *HDFSKrbConfig) XXX_DiscardUnknown() { + xxx_messageInfo_HDFSKrbConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_HDFSKrbConfig proto.InternalMessageInfo + +func (m *HTTPArtifact) Reset() { *m = HTTPArtifact{} } +func (*HTTPArtifact) ProtoMessage() {} +func (*HTTPArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{14} +} +func (m *HTTPArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPArtifact.Merge(m, src) +} +func (m *HTTPArtifact) XXX_Size() int { + return m.Size() +} +func (m *HTTPArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPArtifact proto.InternalMessageInfo + +func (m *Inputs) Reset() { *m = Inputs{} } +func (*Inputs) ProtoMessage() {} +func (*Inputs) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{15} +} +func (m *Inputs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Inputs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Inputs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Inputs.Merge(m, src) +} +func (m *Inputs) XXX_Size() int { + return m.Size() +} +func (m *Inputs) XXX_DiscardUnknown() { + xxx_messageInfo_Inputs.DiscardUnknown(m) +} + +var xxx_messageInfo_Inputs proto.InternalMessageInfo + +func (m *Item) Reset() { *m = Item{} } +func (*Item) ProtoMessage() {} +func (*Item) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{16} +} +func (m *Item) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Item) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Item) XXX_Merge(src proto.Message) { + xxx_messageInfo_Item.Merge(m, src) +} +func (m *Item) XXX_Size() int { + return m.Size() +} +func (m *Item) XXX_DiscardUnknown() { + xxx_messageInfo_Item.DiscardUnknown(m) +} + +var xxx_messageInfo_Item proto.InternalMessageInfo + +func (m *ItemValue) Reset() { *m = ItemValue{} } +func (*ItemValue) ProtoMessage() {} +func (*ItemValue) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{17} +} +func (m *ItemValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ItemValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ItemValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ItemValue.Merge(m, src) +} +func (m *ItemValue) XXX_Size() int { + return m.Size() +} +func (m *ItemValue) XXX_DiscardUnknown() { + xxx_messageInfo_ItemValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ItemValue proto.InternalMessageInfo + +func (m *Metadata) Reset() { *m = Metadata{} } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{18} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return m.Size() +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{19} +} +func (m *NodeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStatus.Merge(m, src) +} +func (m *NodeStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStatus proto.InternalMessageInfo + +func (m *NoneStrategy) Reset() { *m = NoneStrategy{} } +func (*NoneStrategy) ProtoMessage() {} +func (*NoneStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{20} +} +func (m *NoneStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NoneStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NoneStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NoneStrategy.Merge(m, src) +} +func (m *NoneStrategy) XXX_Size() int { + return m.Size() +} +func (m *NoneStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_NoneStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_NoneStrategy proto.InternalMessageInfo + +func (m *Outputs) Reset() { *m = Outputs{} } +func (*Outputs) ProtoMessage() {} +func (*Outputs) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{21} +} +func (m *Outputs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Outputs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Outputs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Outputs.Merge(m, src) +} +func (m *Outputs) XXX_Size() int { + return m.Size() +} +func (m *Outputs) XXX_DiscardUnknown() { + xxx_messageInfo_Outputs.DiscardUnknown(m) +} + +var xxx_messageInfo_Outputs proto.InternalMessageInfo + +func (m *ParallelSteps) Reset() { *m = ParallelSteps{} } +func (*ParallelSteps) ProtoMessage() {} +func (*ParallelSteps) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{22} +} +func (m *ParallelSteps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParallelSteps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ParallelSteps) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParallelSteps.Merge(m, src) +} +func (m *ParallelSteps) XXX_Size() int { + return m.Size() +} +func (m *ParallelSteps) XXX_DiscardUnknown() { + xxx_messageInfo_ParallelSteps.DiscardUnknown(m) +} + +var xxx_messageInfo_ParallelSteps proto.InternalMessageInfo + +func (m *Parameter) Reset() { *m = Parameter{} } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{23} +} +func (m *Parameter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Parameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Parameter.Merge(m, src) +} +func (m *Parameter) XXX_Size() int { + return m.Size() +} +func (m *Parameter) XXX_DiscardUnknown() { + xxx_messageInfo_Parameter.DiscardUnknown(m) +} + +var xxx_messageInfo_Parameter proto.InternalMessageInfo + +func (m *PodGC) Reset() { *m = PodGC{} } +func (*PodGC) ProtoMessage() {} +func (*PodGC) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{24} +} +func (m *PodGC) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodGC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodGC) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodGC.Merge(m, src) +} +func (m *PodGC) XXX_Size() int { + return m.Size() +} +func (m *PodGC) XXX_DiscardUnknown() { + xxx_messageInfo_PodGC.DiscardUnknown(m) +} + +var xxx_messageInfo_PodGC proto.InternalMessageInfo + +func (m *RawArtifact) Reset() { *m = RawArtifact{} } +func (*RawArtifact) ProtoMessage() {} +func (*RawArtifact) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{25} +} +func (m *RawArtifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawArtifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RawArtifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawArtifact.Merge(m, src) +} +func (m *RawArtifact) XXX_Size() int { + return m.Size() +} +func (m *RawArtifact) XXX_DiscardUnknown() { + xxx_messageInfo_RawArtifact.DiscardUnknown(m) +} + +var xxx_messageInfo_RawArtifact proto.InternalMessageInfo + +func (m *ResourceTemplate) Reset() { *m = ResourceTemplate{} } +func (*ResourceTemplate) ProtoMessage() {} +func (*ResourceTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{26} +} +func (m *ResourceTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceTemplate.Merge(m, src) +} +func (m *ResourceTemplate) XXX_Size() int { + return m.Size() +} +func (m *ResourceTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceTemplate proto.InternalMessageInfo + +func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } +func (*RetryStrategy) ProtoMessage() {} +func (*RetryStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{27} +} +func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RetryStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RetryStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetryStrategy.Merge(m, src) +} +func (m *RetryStrategy) XXX_Size() int { + return m.Size() +} +func (m *RetryStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RetryStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo + +func (m *S3Artifact) Reset() { *m = S3Artifact{} } +func (*S3Artifact) ProtoMessage() {} +func (*S3Artifact) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{28} +} +func (m *S3Artifact) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *S3Artifact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *S3Artifact) XXX_Merge(src proto.Message) { + xxx_messageInfo_S3Artifact.Merge(m, src) +} +func (m *S3Artifact) XXX_Size() int { + return m.Size() +} +func (m *S3Artifact) XXX_DiscardUnknown() { + xxx_messageInfo_S3Artifact.DiscardUnknown(m) +} + +var xxx_messageInfo_S3Artifact proto.InternalMessageInfo + +func (m *S3Bucket) Reset() { *m = S3Bucket{} } +func (*S3Bucket) ProtoMessage() {} +func (*S3Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{29} +} +func (m *S3Bucket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *S3Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *S3Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_S3Bucket.Merge(m, src) +} +func (m *S3Bucket) XXX_Size() int { + return m.Size() +} +func (m *S3Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_S3Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_S3Bucket proto.InternalMessageInfo + +func (m *ScriptTemplate) Reset() { *m = ScriptTemplate{} } +func (*ScriptTemplate) ProtoMessage() {} +func (*ScriptTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{30} +} +func (m *ScriptTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScriptTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScriptTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScriptTemplate.Merge(m, src) +} +func (m *ScriptTemplate) XXX_Size() int { + return m.Size() +} +func (m *ScriptTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_ScriptTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_ScriptTemplate proto.InternalMessageInfo + +func (m *Sequence) Reset() { *m = Sequence{} } +func (*Sequence) ProtoMessage() {} +func (*Sequence) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{31} +} +func (m *Sequence) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sequence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Sequence) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sequence.Merge(m, src) +} +func (m *Sequence) XXX_Size() int { + return m.Size() +} +func (m *Sequence) XXX_DiscardUnknown() { + xxx_messageInfo_Sequence.DiscardUnknown(m) +} + +var xxx_messageInfo_Sequence proto.InternalMessageInfo + +func (m *SuspendTemplate) Reset() { *m = SuspendTemplate{} } +func (*SuspendTemplate) ProtoMessage() {} +func (*SuspendTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{32} +} +func (m *SuspendTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SuspendTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SuspendTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_SuspendTemplate.Merge(m, src) +} +func (m *SuspendTemplate) XXX_Size() int { + return m.Size() +} +func (m *SuspendTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_SuspendTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_SuspendTemplate proto.InternalMessageInfo + +func (m *TarStrategy) Reset() { *m = TarStrategy{} } +func (*TarStrategy) ProtoMessage() {} +func (*TarStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{33} +} +func (m *TarStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TarStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TarStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TarStrategy.Merge(m, src) +} +func (m *TarStrategy) XXX_Size() int { + return m.Size() +} +func (m *TarStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_TarStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_TarStrategy proto.InternalMessageInfo + +func (m *Template) Reset() { *m = Template{} } +func (*Template) ProtoMessage() {} +func (*Template) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{34} +} +func (m *Template) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Template) XXX_Merge(src proto.Message) { + xxx_messageInfo_Template.Merge(m, src) +} +func (m *Template) XXX_Size() int { + return m.Size() +} +func (m *Template) XXX_DiscardUnknown() { + xxx_messageInfo_Template.DiscardUnknown(m) +} + +var xxx_messageInfo_Template proto.InternalMessageInfo + +func (m *TemplateRef) Reset() { *m = TemplateRef{} } +func (*TemplateRef) ProtoMessage() {} +func (*TemplateRef) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{35} +} +func (m *TemplateRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateRef.Merge(m, src) +} +func (m *TemplateRef) XXX_Size() int { + return m.Size() +} +func (m *TemplateRef) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateRef.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateRef proto.InternalMessageInfo + +func (m *UserContainer) Reset() { *m = UserContainer{} } +func (*UserContainer) ProtoMessage() {} +func (*UserContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{36} +} +func (m *UserContainer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserContainer.Merge(m, src) +} +func (m *UserContainer) XXX_Size() int { + return m.Size() +} +func (m *UserContainer) XXX_DiscardUnknown() { + xxx_messageInfo_UserContainer.DiscardUnknown(m) +} + +var xxx_messageInfo_UserContainer proto.InternalMessageInfo + +func (m *ValueFrom) Reset() { *m = ValueFrom{} } +func (*ValueFrom) ProtoMessage() {} +func (*ValueFrom) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{37} +} +func (m *ValueFrom) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValueFrom) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValueFrom) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValueFrom.Merge(m, src) +} +func (m *ValueFrom) XXX_Size() int { + return m.Size() +} +func (m *ValueFrom) XXX_DiscardUnknown() { + xxx_messageInfo_ValueFrom.DiscardUnknown(m) +} + +var xxx_messageInfo_ValueFrom proto.InternalMessageInfo + +func (m *Workflow) Reset() { *m = Workflow{} } +func (*Workflow) ProtoMessage() {} +func (*Workflow) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{38} +} +func (m *Workflow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Workflow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Workflow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Workflow.Merge(m, src) +} +func (m *Workflow) XXX_Size() int { + return m.Size() +} +func (m *Workflow) XXX_DiscardUnknown() { + xxx_messageInfo_Workflow.DiscardUnknown(m) +} + +var xxx_messageInfo_Workflow proto.InternalMessageInfo + +func (m *WorkflowList) Reset() { *m = WorkflowList{} } +func (*WorkflowList) ProtoMessage() {} +func (*WorkflowList) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{39} +} +func (m *WorkflowList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowList) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowList.Merge(m, src) +} +func (m *WorkflowList) XXX_Size() int { + return m.Size() +} +func (m *WorkflowList) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowList.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowList proto.InternalMessageInfo + +func (m *WorkflowSpec) Reset() { *m = WorkflowSpec{} } +func (*WorkflowSpec) ProtoMessage() {} +func (*WorkflowSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{40} +} +func (m *WorkflowSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowSpec.Merge(m, src) +} +func (m *WorkflowSpec) XXX_Size() int { + return m.Size() +} +func (m *WorkflowSpec) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowSpec proto.InternalMessageInfo + +func (m *WorkflowStatus) Reset() { *m = WorkflowStatus{} } +func (*WorkflowStatus) ProtoMessage() {} +func (*WorkflowStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{41} +} +func (m *WorkflowStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowStatus.Merge(m, src) +} +func (m *WorkflowStatus) XXX_Size() int { + return m.Size() +} +func (m *WorkflowStatus) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowStatus proto.InternalMessageInfo + +func (m *WorkflowStep) Reset() { *m = WorkflowStep{} } +func (*WorkflowStep) ProtoMessage() {} +func (*WorkflowStep) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{42} +} +func (m *WorkflowStep) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowStep) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowStep) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowStep.Merge(m, src) +} +func (m *WorkflowStep) XXX_Size() int { + return m.Size() +} +func (m *WorkflowStep) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowStep.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowStep proto.InternalMessageInfo + +func (m *WorkflowTemplate) Reset() { *m = WorkflowTemplate{} } +func (*WorkflowTemplate) ProtoMessage() {} +func (*WorkflowTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{43} +} +func (m *WorkflowTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplate.Merge(m, src) +} +func (m *WorkflowTemplate) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplate proto.InternalMessageInfo + +func (m *WorkflowTemplateList) Reset() { *m = WorkflowTemplateList{} } +func (*WorkflowTemplateList) ProtoMessage() {} +func (*WorkflowTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{44} +} +func (m *WorkflowTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplateList.Merge(m, src) +} +func (m *WorkflowTemplateList) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplateList proto.InternalMessageInfo + +func (m *WorkflowTemplateSpec) Reset() { *m = WorkflowTemplateSpec{} } +func (*WorkflowTemplateSpec) ProtoMessage() {} +func (*WorkflowTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c23edafa7e7ea072, []int{45} +} +func (m *WorkflowTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WorkflowTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplateSpec.Merge(m, src) +} +func (m *WorkflowTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplateSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ArchiveStrategy)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArchiveStrategy") + proto.RegisterType((*Arguments)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Arguments") + proto.RegisterType((*Artifact)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Artifact") + proto.RegisterType((*ArtifactLocation)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactLocation") + proto.RegisterType((*ArtifactRepositoryRef)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactRepositoryRef") + proto.RegisterType((*ArtifactoryArtifact)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactoryArtifact") + proto.RegisterType((*ArtifactoryAuth)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ArtifactoryAuth") + proto.RegisterType((*ContinueOn)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ContinueOn") + proto.RegisterType((*DAGTask)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.DAGTask") + proto.RegisterType((*DAGTemplate)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.DAGTemplate") + proto.RegisterType((*GitArtifact)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.GitArtifact") + proto.RegisterType((*HDFSArtifact)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HDFSArtifact") + proto.RegisterType((*HDFSConfig)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HDFSConfig") + proto.RegisterType((*HDFSKrbConfig)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HDFSKrbConfig") + proto.RegisterType((*HTTPArtifact)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.HTTPArtifact") + proto.RegisterType((*Inputs)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Inputs") + proto.RegisterType((*Item)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Item") + proto.RegisterMapType((map[string]ItemValue)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Item.MapValEntry") + proto.RegisterType((*ItemValue)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ItemValue") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ItemValue.MapValEntry") + proto.RegisterType((*Metadata)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Metadata") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Metadata.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Metadata.LabelsEntry") + proto.RegisterType((*NodeStatus)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.NodeStatus") + proto.RegisterType((*NoneStrategy)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.NoneStrategy") + proto.RegisterType((*Outputs)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Outputs") + proto.RegisterType((*ParallelSteps)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ParallelSteps") + proto.RegisterType((*Parameter)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Parameter") + proto.RegisterType((*PodGC)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.PodGC") + proto.RegisterType((*RawArtifact)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.RawArtifact") + proto.RegisterType((*ResourceTemplate)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ResourceTemplate") + proto.RegisterType((*RetryStrategy)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.RetryStrategy") + proto.RegisterType((*S3Artifact)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.S3Artifact") + proto.RegisterType((*S3Bucket)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.S3Bucket") + proto.RegisterType((*ScriptTemplate)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ScriptTemplate") + proto.RegisterType((*Sequence)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Sequence") + proto.RegisterType((*SuspendTemplate)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.SuspendTemplate") + proto.RegisterType((*TarStrategy)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TarStrategy") + proto.RegisterType((*Template)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Template") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Template.NodeSelectorEntry") + proto.RegisterType((*TemplateRef)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.TemplateRef") + proto.RegisterType((*UserContainer)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.UserContainer") + proto.RegisterType((*ValueFrom)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.ValueFrom") + proto.RegisterType((*Workflow)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow") + proto.RegisterType((*WorkflowList)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowList") + proto.RegisterType((*WorkflowSpec)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowSpec") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowSpec.NodeSelectorEntry") + proto.RegisterType((*WorkflowStatus)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowStatus") + proto.RegisterMapType((map[string]NodeStatus)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowStatus.NodesEntry") + proto.RegisterType((*WorkflowStep)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowStep") + proto.RegisterType((*WorkflowTemplate)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplate") + proto.RegisterType((*WorkflowTemplateList)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplateList") + proto.RegisterType((*WorkflowTemplateSpec)(nil), "github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplateSpec") +} + +func init() { + proto.RegisterFile("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1/generated.proto", fileDescriptor_c23edafa7e7ea072) +} + +var fileDescriptor_c23edafa7e7ea072 = []byte{ + // 4449 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x6c, 0x1c, 0xc9, + 0x75, 0xd6, 0x90, 0x9c, 0xe1, 0xcc, 0x1b, 0xfe, 0x96, 0x44, 0x69, 0x96, 0xd6, 0x72, 0x94, 0x5e, + 0xac, 0x22, 0x27, 0xbb, 0xc3, 0x95, 0x64, 0x27, 0xeb, 0x75, 0x76, 0xd7, 0x1c, 0xfe, 0x48, 0x14, + 0xc5, 0x9f, 0xbc, 0xa1, 0x24, 0x38, 0x1b, 0xc4, 0x29, 0xf6, 0x14, 0x67, 0x5a, 0x9c, 0xe9, 0xee, + 0xed, 0xea, 0x21, 0x4d, 0xec, 0x21, 0x8b, 0x20, 0xbf, 0x08, 0x0c, 0x24, 0x97, 0xc4, 0x88, 0x2f, + 0x41, 0x10, 0x24, 0x97, 0x1c, 0x72, 0xca, 0x21, 0x07, 0x1f, 0x82, 0x1c, 0x16, 0xb9, 0x64, 0x6f, + 0xf1, 0x21, 0x20, 0xbc, 0xf4, 0x25, 0x41, 0x02, 0xe4, 0x18, 0x40, 0xa7, 0xa0, 0xaa, 0xab, 0xab, + 0x7f, 0x66, 0x68, 0x51, 0x3d, 0x94, 0x12, 0xc3, 0x3e, 0x91, 0xfd, 0xde, 0xab, 0xef, 0x55, 0xbd, + 0xaa, 0x7a, 0xf5, 0xde, 0xab, 0x1a, 0x58, 0x6e, 0x59, 0x7e, 0xbb, 0xb7, 0x57, 0x33, 0x9d, 0xee, + 0x22, 0xf5, 0x5a, 0x8e, 0xeb, 0x39, 0x4f, 0xe5, 0x3f, 0x8b, 0xee, 0x41, 0x6b, 0x91, 0xba, 0x16, + 0x5f, 0x3c, 0x72, 0xbc, 0x83, 0xfd, 0x8e, 0x73, 0xb4, 0x78, 0x78, 0x9b, 0x76, 0xdc, 0x36, 0xbd, + 0xbd, 0xd8, 0x62, 0x36, 0xf3, 0xa8, 0xcf, 0x9a, 0x35, 0xd7, 0x73, 0x7c, 0x87, 0xdc, 0x8d, 0x40, + 0x6a, 0x21, 0x88, 0xfc, 0xa7, 0xe6, 0x1e, 0xb4, 0x6a, 0x02, 0xa4, 0x16, 0x82, 0xd4, 0x42, 0x90, + 0xf9, 0xb7, 0x63, 0x9a, 0x5b, 0x8e, 0x50, 0x28, 0xb0, 0xf6, 0x7a, 0xfb, 0xf2, 0x4b, 0x7e, 0xc8, + 0xff, 0x02, 0x1d, 0xf3, 0xc6, 0xc1, 0xbb, 0xbc, 0x66, 0x39, 0xa2, 0x4b, 0x8b, 0xa6, 0xe3, 0xb1, + 0xc5, 0xc3, 0xbe, 0x7e, 0xcc, 0x7f, 0x25, 0x92, 0xe9, 0x52, 0xb3, 0x6d, 0xd9, 0xcc, 0x3b, 0x8e, + 0xc6, 0xd1, 0x65, 0x3e, 0x1d, 0xd4, 0x6a, 0xf1, 0xac, 0x56, 0x5e, 0xcf, 0xf6, 0xad, 0x2e, 0xeb, + 0x6b, 0xf0, 0x4b, 0xcf, 0x6b, 0xc0, 0xcd, 0x36, 0xeb, 0xd2, 0x74, 0x3b, 0xe3, 0x5f, 0x72, 0x30, + 0xbd, 0xe4, 0x99, 0x6d, 0xeb, 0x90, 0x35, 0x7c, 0xc1, 0x68, 0x1d, 0x93, 0x8f, 0x60, 0xd4, 0xa7, + 0x5e, 0x25, 0x77, 0x23, 0x77, 0xab, 0x7c, 0xe7, 0x1b, 0xb5, 0x0c, 0x86, 0xac, 0xed, 0x52, 0x2f, + 0x84, 0xab, 0x8f, 0x9f, 0x9e, 0x54, 0x47, 0x77, 0xa9, 0x87, 0x02, 0x95, 0x7c, 0x0b, 0xc6, 0x6c, + 0xc7, 0x66, 0x95, 0x11, 0x89, 0xbe, 0x94, 0x09, 0x7d, 0xcb, 0xb1, 0x75, 0x6f, 0xeb, 0xc5, 0xd3, + 0x93, 0xea, 0x98, 0xa0, 0xa0, 0x04, 0x36, 0xfe, 0x3b, 0x07, 0xa5, 0x25, 0xaf, 0xd5, 0xeb, 0x32, + 0xdb, 0xe7, 0xc4, 0x03, 0x70, 0xa9, 0x47, 0xbb, 0xcc, 0x67, 0x1e, 0xaf, 0xe4, 0x6e, 0x8c, 0xde, + 0x2a, 0xdf, 0xf9, 0x20, 0x93, 0xd2, 0x9d, 0x10, 0xa6, 0x4e, 0x3e, 0x3b, 0xa9, 0x5e, 0x3a, 0x3d, + 0xa9, 0x82, 0x26, 0x71, 0x8c, 0x69, 0x21, 0x36, 0x94, 0xa8, 0xe7, 0x5b, 0xfb, 0xd4, 0xf4, 0x79, + 0x65, 0x44, 0xaa, 0x7c, 0x3f, 0x93, 0xca, 0x25, 0x85, 0x52, 0x9f, 0x55, 0x1a, 0x4b, 0x21, 0x85, + 0x63, 0xa4, 0xc2, 0xf8, 0xcf, 0x51, 0x28, 0x86, 0x0c, 0x72, 0x03, 0xc6, 0x6c, 0xda, 0x65, 0x72, + 0xf6, 0x4a, 0xf5, 0x09, 0xd5, 0x70, 0x6c, 0x8b, 0x76, 0x85, 0x81, 0x68, 0x97, 0x09, 0x09, 0x97, + 0xfa, 0x6d, 0x39, 0x03, 0x31, 0x89, 0x1d, 0xea, 0xb7, 0x51, 0x72, 0xc8, 0x75, 0x18, 0xeb, 0x3a, + 0x4d, 0x56, 0x19, 0xbd, 0x91, 0xbb, 0x95, 0x0f, 0x0c, 0xbc, 0xe9, 0x34, 0x19, 0x4a, 0xaa, 0x68, + 0xbf, 0xef, 0x39, 0xdd, 0xca, 0x58, 0xb2, 0xfd, 0x9a, 0xe7, 0x74, 0x51, 0x72, 0xc8, 0x1f, 0xe5, + 0x60, 0x26, 0xec, 0xde, 0x43, 0xc7, 0xa4, 0xbe, 0xe5, 0xd8, 0x95, 0xbc, 0x9c, 0xf0, 0xd5, 0xa1, + 0x0c, 0x11, 0x82, 0xd5, 0x2b, 0x4a, 0xeb, 0x4c, 0x9a, 0x83, 0x7d, 0x8a, 0xc9, 0x1d, 0x80, 0x56, + 0xc7, 0xd9, 0xa3, 0x1d, 0x61, 0x83, 0x4a, 0x41, 0xf6, 0x5a, 0x4f, 0xe1, 0x3d, 0xcd, 0xc1, 0x98, + 0x14, 0x39, 0x80, 0x71, 0x1a, 0xec, 0x8a, 0xca, 0xb8, 0xec, 0xf7, 0x4a, 0xc6, 0x7e, 0x27, 0x76, + 0x56, 0xbd, 0x7c, 0x7a, 0x52, 0x1d, 0x57, 0x44, 0x0c, 0x35, 0x90, 0xb7, 0xa0, 0xe8, 0xb8, 0xa2, + 0xab, 0xb4, 0x53, 0x29, 0xde, 0xc8, 0xdd, 0x2a, 0xd6, 0x67, 0x54, 0xf7, 0x8a, 0xdb, 0x8a, 0x8e, + 0x5a, 0xc2, 0xf8, 0xb3, 0x3c, 0xf4, 0x8d, 0x9a, 0xdc, 0x86, 0xb2, 0x42, 0x7b, 0xe8, 0xb4, 0xb8, + 0x9c, 0xfc, 0x62, 0x7d, 0xfa, 0xf4, 0xa4, 0x5a, 0x5e, 0x8a, 0xc8, 0x18, 0x97, 0x21, 0x4f, 0x60, + 0x84, 0xdf, 0x55, 0xdb, 0xf0, 0xc3, 0x4c, 0xa3, 0x6b, 0xdc, 0xd5, 0x0b, 0xb4, 0x70, 0x7a, 0x52, + 0x1d, 0x69, 0xdc, 0xc5, 0x11, 0x7e, 0x57, 0xb8, 0x8f, 0x96, 0xe5, 0xcb, 0xc5, 0x93, 0xd5, 0x7d, + 0xdc, 0xb3, 0x7c, 0x0d, 0x2d, 0xdd, 0xc7, 0x3d, 0xcb, 0x47, 0x81, 0x2a, 0xdc, 0x47, 0xdb, 0xf7, + 0x5d, 0xb9, 0xf8, 0xb2, 0xba, 0x8f, 0xfb, 0xbb, 0xbb, 0x3b, 0x1a, 0x5e, 0xae, 0x6e, 0x41, 0x41, + 0x09, 0x4c, 0x3e, 0x11, 0x96, 0x0c, 0x78, 0x8e, 0x77, 0xac, 0x56, 0xed, 0xfd, 0xa1, 0x56, 0xad, + 0xe3, 0x1d, 0x6b, 0x75, 0x6a, 0x4e, 0x34, 0x03, 0xe3, 0xda, 0xe4, 0xe8, 0x9a, 0xfb, 0x5c, 0x2e, + 0xd2, 0xcc, 0xa3, 0x5b, 0x59, 0x6b, 0xa4, 0x46, 0xb7, 0xb2, 0xd6, 0x40, 0x09, 0x2c, 0xe6, 0xc6, + 0xa3, 0x47, 0x6a, 0x4d, 0x67, 0x9b, 0x1b, 0xa4, 0x47, 0xc9, 0xb9, 0x41, 0x7a, 0x84, 0x02, 0xd5, + 0x68, 0xc1, 0x5c, 0xc8, 0x41, 0xe6, 0x3a, 0xdc, 0x92, 0x03, 0x64, 0xfb, 0x64, 0x11, 0x4a, 0xa6, + 0x63, 0xef, 0x5b, 0xad, 0x4d, 0xea, 0x2a, 0xc7, 0xa4, 0x3d, 0xda, 0x72, 0xc8, 0xc0, 0x48, 0x86, + 0xbc, 0x0e, 0xa3, 0x07, 0xec, 0x58, 0x79, 0xa8, 0xb2, 0x12, 0x1d, 0xdd, 0x60, 0xc7, 0x28, 0xe8, + 0xc6, 0xf7, 0x73, 0x70, 0x79, 0x80, 0x71, 0x45, 0xb3, 0x9e, 0xd7, 0x51, 0x1a, 0x74, 0xb3, 0x47, + 0xf8, 0x10, 0x05, 0x9d, 0xfc, 0x7e, 0x0e, 0xa6, 0x63, 0xd6, 0x5e, 0xea, 0x29, 0x27, 0x98, 0x7d, + 0x77, 0x27, 0xb0, 0xea, 0xd7, 0x94, 0xc6, 0xe9, 0x14, 0x03, 0xd3, 0x5a, 0x8d, 0x7f, 0x95, 0xa7, + 0x6e, 0x82, 0x46, 0x28, 0x4c, 0xf5, 0x38, 0xf3, 0x84, 0x8b, 0x6e, 0x30, 0xd3, 0x63, 0xbe, 0x3a, + 0x80, 0xdf, 0xac, 0x05, 0x47, 0xbb, 0xe8, 0x45, 0x4d, 0x44, 0x19, 0xb5, 0xc3, 0xdb, 0xb5, 0x40, + 0x62, 0x83, 0x1d, 0x37, 0x58, 0x87, 0x09, 0x8c, 0x3a, 0x39, 0x3d, 0xa9, 0x4e, 0x3d, 0x4a, 0x00, + 0x60, 0x0a, 0x50, 0xa8, 0x70, 0x29, 0xe7, 0x47, 0x8e, 0xd7, 0x54, 0x2a, 0x46, 0x5e, 0x58, 0xc5, + 0x4e, 0x02, 0x00, 0x53, 0x80, 0xc6, 0x37, 0x01, 0x96, 0x1d, 0xdb, 0xb7, 0xec, 0x1e, 0xdb, 0xb6, + 0xc9, 0x1b, 0x90, 0x67, 0x9e, 0xe7, 0x78, 0xca, 0x21, 0x4d, 0x2a, 0x03, 0xe5, 0x57, 0x05, 0x11, + 0x03, 0x1e, 0xb9, 0x09, 0x85, 0x7d, 0x6a, 0x75, 0x58, 0x53, 0xf6, 0xa6, 0x58, 0x9f, 0x52, 0x52, + 0x85, 0x35, 0x49, 0x45, 0xc5, 0x35, 0xfe, 0xbc, 0x00, 0xe3, 0x2b, 0x4b, 0xf7, 0x76, 0x29, 0x3f, + 0x38, 0xc7, 0x29, 0xf7, 0x16, 0x14, 0x7d, 0xd6, 0x75, 0x3b, 0xd4, 0x67, 0x6a, 0x1d, 0x69, 0xa7, + 0xba, 0xab, 0xe8, 0xa8, 0x25, 0x88, 0x23, 0x8e, 0x6c, 0x15, 0x33, 0x28, 0xcf, 0xf5, 0x41, 0xc6, + 0x35, 0xa1, 0x50, 0xe2, 0x67, 0xb6, 0x22, 0x61, 0xa4, 0x83, 0x70, 0x28, 0x87, 0xca, 0x91, 0xed, + 0x2b, 0x77, 0x96, 0x31, 0xd6, 0x8a, 0x70, 0x02, 0xf7, 0x12, 0x23, 0x60, 0x5c, 0x0b, 0xf9, 0x0a, + 0x4c, 0x34, 0x99, 0xcb, 0xec, 0x26, 0xb3, 0x4d, 0x8b, 0xf1, 0x4a, 0xfe, 0xc6, 0xa8, 0xb0, 0xcb, + 0xe9, 0x49, 0x75, 0x62, 0x25, 0x46, 0xc7, 0x84, 0x14, 0x79, 0x0a, 0xa5, 0x23, 0xcb, 0x6f, 0xaf, + 0xfb, 0xac, 0x2b, 0x3c, 0x93, 0x08, 0x67, 0xbe, 0x96, 0xa9, 0xa3, 0x02, 0x21, 0x32, 0xcb, 0x93, + 0x10, 0x13, 0x23, 0x78, 0xe1, 0x29, 0xc4, 0x87, 0x0c, 0xac, 0xa4, 0x97, 0x2a, 0x25, 0x1b, 0x48, + 0x06, 0x46, 0x32, 0x84, 0xc3, 0x84, 0xf8, 0x68, 0xb0, 0x8f, 0x7b, 0xcc, 0x36, 0x99, 0x3c, 0x3f, + 0xb3, 0x86, 0x5b, 0x21, 0x48, 0x60, 0x91, 0x27, 0x31, 0x58, 0x4c, 0x28, 0x11, 0xab, 0xef, 0xa8, + 0xcd, 0xec, 0x4a, 0x29, 0xb9, 0xfa, 0x9e, 0xb4, 0x99, 0x8d, 0x92, 0x43, 0x1c, 0x00, 0x53, 0x6f, + 0x83, 0x0a, 0x0c, 0x71, 0xc8, 0x46, 0xbb, 0xa9, 0x3e, 0x25, 0x02, 0x96, 0xe8, 0x1b, 0x63, 0x2a, + 0x8c, 0x7f, 0xcc, 0x41, 0x59, 0x6c, 0x8e, 0x70, 0x41, 0xdf, 0x84, 0x82, 0x4f, 0xbd, 0x96, 0xf2, + 0x22, 0xa5, 0x68, 0x53, 0xed, 0x4a, 0x2a, 0x2a, 0x2e, 0xa1, 0x90, 0xf7, 0x29, 0x3f, 0x08, 0xe3, + 0xd4, 0x5f, 0xc9, 0xd4, 0x47, 0xb5, 0x2b, 0xa3, 0xfd, 0x2d, 0xbe, 0x38, 0x06, 0xc8, 0xe4, 0x16, + 0x14, 0xc5, 0x0e, 0x5e, 0xa3, 0x3c, 0x08, 0x0a, 0x8a, 0xf5, 0x09, 0xb1, 0x0b, 0xd7, 0x14, 0x0d, + 0x35, 0xd7, 0xf8, 0xdb, 0x31, 0x28, 0xc7, 0x8e, 0x7e, 0x61, 0x67, 0x8f, 0xb9, 0x4e, 0x7a, 0x97, + 0x8b, 0xc3, 0x05, 0x25, 0x47, 0xec, 0x72, 0x8f, 0x1d, 0x5a, 0x5c, 0x04, 0x98, 0xa9, 0x5d, 0x8e, + 0x8a, 0x8e, 0x5a, 0x82, 0x54, 0x21, 0xdf, 0x64, 0xae, 0xdf, 0x96, 0xdd, 0x18, 0xab, 0x97, 0x44, + 0x57, 0x57, 0x04, 0x01, 0x03, 0xba, 0x10, 0xd8, 0x67, 0xbe, 0xd9, 0xae, 0x8c, 0xc9, 0x9d, 0x21, + 0x05, 0xd6, 0x04, 0x01, 0x03, 0xfa, 0x00, 0x27, 0x9d, 0x7f, 0xf9, 0x4e, 0xba, 0x70, 0xc1, 0x4e, + 0x9a, 0xb8, 0x70, 0x99, 0xf3, 0xf6, 0x8e, 0x67, 0x1d, 0x52, 0x9f, 0xc9, 0xc6, 0x52, 0xcf, 0xf8, + 0x8b, 0xe8, 0xb9, 0x76, 0x7a, 0x52, 0xbd, 0xdc, 0x68, 0xdc, 0x4f, 0xa3, 0xe0, 0x20, 0x68, 0xd2, + 0x80, 0x39, 0xcb, 0xe6, 0xcc, 0xec, 0x79, 0x6c, 0xbd, 0x65, 0x3b, 0x1e, 0xbb, 0xef, 0x70, 0x01, + 0xa7, 0xe2, 0xdd, 0xd7, 0xd5, 0xa4, 0xcd, 0xad, 0x0f, 0x12, 0xc2, 0xc1, 0x6d, 0x8d, 0x7f, 0xce, + 0xc1, 0x44, 0x3c, 0xda, 0x21, 0x1c, 0xa0, 0xbd, 0xb2, 0xd6, 0x08, 0x42, 0x0a, 0x75, 0x7c, 0x7e, + 0x98, 0x39, 0x88, 0x0a, 0x60, 0xa2, 0x54, 0x21, 0xa2, 0x61, 0x4c, 0xcd, 0x39, 0xd2, 0xa9, 0x37, + 0x20, 0xbf, 0xef, 0x78, 0x26, 0x53, 0xab, 0x5f, 0xef, 0x92, 0x35, 0x41, 0xc4, 0x80, 0x67, 0xfc, + 0x7b, 0x0e, 0x62, 0x1a, 0xc8, 0x6f, 0xc1, 0xa4, 0xd0, 0xb1, 0xe1, 0xed, 0x25, 0x46, 0x53, 0xcf, + 0x3c, 0x1a, 0x8d, 0x54, 0x9f, 0x53, 0xfa, 0x27, 0x13, 0x64, 0x4c, 0xea, 0x23, 0xbf, 0x08, 0x25, + 0xda, 0x6c, 0x7a, 0x8c, 0x73, 0x16, 0x38, 0x87, 0x52, 0x7d, 0x52, 0x9e, 0x66, 0x21, 0x11, 0x23, + 0xbe, 0xd8, 0x86, 0x22, 0xbc, 0x14, 0x2b, 0x5b, 0x0e, 0x32, 0xb6, 0x0d, 0x85, 0x12, 0x41, 0x47, + 0x2d, 0x61, 0x7c, 0x67, 0x0c, 0x92, 0xba, 0x49, 0x13, 0xa6, 0x0f, 0xbc, 0xbd, 0xe5, 0x65, 0x6a, + 0xb6, 0x33, 0x05, 0x3f, 0x97, 0x45, 0xd4, 0xb5, 0x91, 0x44, 0xc0, 0x34, 0xa4, 0xd2, 0xb2, 0xc1, + 0x8e, 0x7d, 0xba, 0x97, 0x25, 0xfe, 0x09, 0xb5, 0xc4, 0x11, 0x30, 0x0d, 0x49, 0xbe, 0x0a, 0xe5, + 0x03, 0x6f, 0x2f, 0xdc, 0xe4, 0xca, 0x1c, 0x97, 0x95, 0x39, 0xca, 0x1b, 0x11, 0x0b, 0xe3, 0x72, + 0xc2, 0x84, 0x07, 0xde, 0x1e, 0x32, 0xda, 0x09, 0x33, 0x6b, 0x6d, 0xc2, 0x0d, 0x45, 0x47, 0x2d, + 0x41, 0x5c, 0x20, 0x07, 0xa1, 0xf5, 0x74, 0x04, 0xad, 0x7c, 0xd1, 0xad, 0x41, 0xa3, 0xd1, 0x42, + 0xf1, 0x01, 0x5d, 0x3d, 0x3d, 0xa9, 0x92, 0x8d, 0x3e, 0x1c, 0x1c, 0x80, 0x4d, 0xbe, 0x09, 0xd7, + 0x0e, 0xbc, 0xbd, 0x06, 0xf3, 0x0e, 0x2d, 0x93, 0xed, 0x78, 0x96, 0x6d, 0x5a, 0x6e, 0x22, 0xa5, + 0xae, 0xaa, 0xee, 0x5e, 0xdb, 0x18, 0x2c, 0x86, 0x67, 0xb5, 0x37, 0xde, 0x86, 0x89, 0x78, 0x4a, + 0xf6, 0x9c, 0x30, 0xde, 0xf8, 0xaf, 0x1c, 0x14, 0xd6, 0x6d, 0xb7, 0xf7, 0x53, 0x52, 0xdd, 0xf9, + 0xcb, 0x31, 0x18, 0x13, 0xc1, 0x11, 0xb9, 0x05, 0x63, 0xfe, 0xb1, 0x1b, 0xc4, 0xbc, 0xa3, 0xf5, + 0x2b, 0xa1, 0xa3, 0xd9, 0x3d, 0x76, 0xd9, 0x33, 0xf5, 0x17, 0xa5, 0x04, 0xf9, 0x00, 0x0a, 0x76, + 0xaf, 0xfb, 0x98, 0x76, 0x94, 0x53, 0xba, 0x19, 0x1e, 0xfe, 0x5b, 0x92, 0xfa, 0xec, 0xa4, 0x7a, + 0x85, 0xd9, 0xa6, 0xd3, 0xb4, 0xec, 0xd6, 0xe2, 0x53, 0xee, 0xd8, 0xb5, 0xad, 0x5e, 0x77, 0x8f, + 0x79, 0xa8, 0x5a, 0x91, 0x2f, 0xc3, 0xf8, 0x9e, 0xe3, 0x74, 0x04, 0x40, 0xe0, 0xb2, 0xa6, 0x15, + 0xc0, 0x78, 0x3d, 0x20, 0x63, 0xc8, 0x17, 0x71, 0x06, 0xf7, 0x3d, 0x21, 0x39, 0x96, 0x8c, 0x33, + 0x1a, 0x92, 0x8a, 0x8a, 0x4b, 0xba, 0x50, 0xe8, 0x52, 0x57, 0xc8, 0xe5, 0xa5, 0xc9, 0x56, 0x33, + 0x47, 0x90, 0xb5, 0x4d, 0x89, 0xb3, 0x6a, 0xfb, 0xde, 0x71, 0xa4, 0x2e, 0x20, 0xa2, 0x52, 0x42, + 0x2c, 0x18, 0xef, 0x58, 0xdc, 0x17, 0xfa, 0x0a, 0x43, 0xac, 0x0a, 0xa1, 0xef, 0x31, 0xed, 0xf4, + 0x58, 0x64, 0x81, 0x87, 0x01, 0x2c, 0x86, 0xf8, 0xf3, 0xc7, 0x50, 0x8e, 0xf5, 0x88, 0xcc, 0x04, + 0xa9, 0xab, 0x5c, 0xbc, 0x32, 0x5b, 0x25, 0xbb, 0x90, 0x3f, 0x14, 0x18, 0xca, 0xd9, 0x0c, 0xd9, + 0x13, 0x0c, 0xc0, 0xde, 0x1b, 0x79, 0x37, 0xf7, 0x5e, 0xf1, 0xbb, 0x7f, 0x51, 0xbd, 0xf4, 0xe9, + 0xbf, 0xdd, 0xb8, 0x64, 0xfc, 0xd3, 0x28, 0x94, 0xb4, 0xc8, 0x4f, 0xf6, 0x4a, 0xf1, 0x52, 0x2b, + 0xe5, 0xc1, 0x70, 0xf6, 0x3a, 0xd7, 0x72, 0x59, 0x4a, 0x2e, 0x97, 0x89, 0xfa, 0xcf, 0xc7, 0xa6, + 0xfa, 0xd9, 0x49, 0xb5, 0x92, 0x34, 0x02, 0xd2, 0xa3, 0x4d, 0xc6, 0x39, 0x6d, 0xb1, 0x68, 0x19, + 0x7c, 0xed, 0x79, 0xcb, 0xe0, 0x4a, 0x7c, 0x19, 0x94, 0x06, 0x4f, 0xe3, 0xa7, 0xa3, 0x50, 0xdc, + 0x64, 0x3e, 0x6d, 0x52, 0x9f, 0x92, 0xdf, 0xcd, 0x41, 0x99, 0xda, 0xb6, 0xe3, 0xcb, 0x12, 0x5f, + 0xe8, 0xde, 0xb6, 0x32, 0x99, 0x23, 0x04, 0xad, 0x2d, 0x45, 0x80, 0x81, 0x49, 0xf4, 0xc9, 0x14, + 0xe3, 0x60, 0x5c, 0x2f, 0xf9, 0x18, 0x0a, 0x1d, 0xba, 0xc7, 0x3a, 0xa1, 0xb7, 0x5b, 0x1f, 0xae, + 0x07, 0x0f, 0x25, 0x56, 0x6a, 0x3e, 0x02, 0x22, 0x2a, 0x45, 0xf3, 0x1f, 0xc0, 0x4c, 0xba, 0xa3, + 0x2f, 0x62, 0x51, 0x31, 0x19, 0x31, 0x35, 0x2f, 0xd2, 0xd4, 0xf8, 0xab, 0x22, 0xc0, 0x96, 0xd3, + 0x64, 0x0d, 0x9f, 0xfa, 0x3d, 0x4e, 0xe6, 0x61, 0xc4, 0x6a, 0xaa, 0xa3, 0x08, 0x54, 0x6f, 0x47, + 0xd6, 0x57, 0x70, 0xc4, 0x6a, 0xea, 0x22, 0xc4, 0xc8, 0x99, 0x45, 0x88, 0xaf, 0x42, 0xb9, 0x69, + 0x71, 0xb7, 0x43, 0x8f, 0xb7, 0x06, 0xc4, 0x02, 0x2b, 0x11, 0x0b, 0xe3, 0x72, 0xe4, 0x2d, 0xb5, + 0x7f, 0x83, 0x8d, 0x52, 0x49, 0xed, 0xdf, 0xa2, 0xe8, 0x5e, 0x6c, 0x0f, 0xbf, 0x0b, 0x13, 0x61, + 0x92, 0x2f, 0xb5, 0xe4, 0x65, 0xab, 0x70, 0xd7, 0x4f, 0xec, 0xc6, 0x78, 0x98, 0x90, 0x4c, 0x17, + 0x21, 0x0a, 0xaf, 0xa4, 0x08, 0xf1, 0x0e, 0xe4, 0xdd, 0x36, 0xe5, 0x4c, 0xa5, 0xf7, 0xf3, 0x61, + 0x34, 0xbc, 0x23, 0x88, 0xcf, 0x4e, 0xaa, 0x25, 0x31, 0x3c, 0xf9, 0x81, 0x81, 0x20, 0xb9, 0x03, + 0xb0, 0xe7, 0xf4, 0xec, 0x26, 0xf5, 0x8e, 0xd7, 0x57, 0x64, 0xc6, 0x10, 0x2b, 0xe0, 0xd7, 0x35, + 0x07, 0x63, 0x52, 0xc2, 0x31, 0x75, 0x83, 0x2d, 0xaa, 0xb2, 0x74, 0xed, 0x98, 0xf4, 0xce, 0x55, + 0x7c, 0xf2, 0x11, 0x94, 0xb8, 0x4f, 0x3d, 0x9f, 0x35, 0x97, 0x7c, 0x95, 0xaa, 0xff, 0x42, 0x2c, + 0x84, 0xd2, 0xd7, 0x69, 0xd1, 0xf0, 0xbb, 0xcc, 0xa7, 0x22, 0xa8, 0xda, 0xb5, 0xba, 0x2c, 0x3a, + 0xbd, 0x1b, 0x21, 0x08, 0x46, 0x78, 0xe4, 0x37, 0x00, 0xf6, 0x2d, 0xdb, 0xe2, 0x6d, 0x89, 0x5e, + 0x7e, 0x61, 0x74, 0x3d, 0xce, 0x35, 0x8d, 0x82, 0x31, 0x44, 0x91, 0x5b, 0xb8, 0x4e, 0x73, 0x7d, + 0xa7, 0x32, 0x21, 0x47, 0xa9, 0x73, 0x8b, 0x1d, 0x41, 0xc4, 0x80, 0x27, 0x32, 0xf0, 0x26, 0x65, + 0x5d, 0xc7, 0x66, 0xcd, 0xca, 0x64, 0x94, 0x81, 0xaf, 0x28, 0x1a, 0x6a, 0x2e, 0xf9, 0x16, 0x14, + 0x2c, 0x19, 0x5a, 0x55, 0xa6, 0x64, 0x57, 0xbf, 0x9e, 0xcd, 0xf9, 0x4a, 0x88, 0x3a, 0x88, 0x9d, + 0x1d, 0xfc, 0x8f, 0x0a, 0x96, 0x98, 0x30, 0xee, 0xf4, 0x7c, 0xa9, 0x61, 0x5a, 0x6a, 0xc8, 0x56, + 0x71, 0xd8, 0x0e, 0x30, 0x82, 0x0b, 0x15, 0xf5, 0x81, 0x21, 0xb2, 0x18, 0xaf, 0xd9, 0xb6, 0x3a, + 0x4d, 0x8f, 0xd9, 0x95, 0x19, 0x99, 0xba, 0xc8, 0xf1, 0x2e, 0x2b, 0x1a, 0x6a, 0x2e, 0xf9, 0x65, + 0x98, 0x74, 0x7a, 0xbe, 0x5c, 0x37, 0x62, 0xd9, 0xf1, 0xca, 0xac, 0x14, 0x9f, 0x15, 0xe9, 0xd1, + 0x76, 0x9c, 0x81, 0x49, 0x39, 0x63, 0x0a, 0x26, 0xe2, 0xb7, 0x90, 0xc6, 0x9f, 0x8c, 0x40, 0xd8, + 0x8f, 0x9f, 0x86, 0xa8, 0x94, 0x18, 0x50, 0xf0, 0x18, 0xef, 0x75, 0x7c, 0xe5, 0xd4, 0xe4, 0x5c, + 0xa3, 0xa4, 0xa0, 0xe2, 0x18, 0x47, 0x30, 0x29, 0x7a, 0xdb, 0xe9, 0xb0, 0x4e, 0xc3, 0x67, 0x2e, + 0x27, 0xfb, 0x90, 0xe7, 0xe2, 0x1f, 0x65, 0x93, 0x6c, 0xf7, 0x1b, 0x4f, 0x14, 0x45, 0x40, 0x46, + 0xeb, 0x5d, 0x2a, 0xc0, 0x00, 0xde, 0xf8, 0xd3, 0x11, 0x28, 0x69, 0x3b, 0x9d, 0xa3, 0x56, 0xfc, + 0x26, 0x8c, 0x37, 0xd9, 0x3e, 0x15, 0xa3, 0x51, 0x57, 0x0e, 0x62, 0x59, 0xad, 0x04, 0x24, 0x0c, + 0x79, 0xa4, 0x1a, 0x1e, 0x1a, 0xc1, 0x90, 0x65, 0x75, 0x28, 0x1e, 0x93, 0x91, 0x03, 0x28, 0xc9, + 0x7f, 0xd6, 0xc2, 0xeb, 0xd1, 0xac, 0xf3, 0xfe, 0x38, 0x44, 0x09, 0x72, 0x6e, 0xfd, 0x89, 0x11, + 0x7e, 0xea, 0x5a, 0x33, 0x7f, 0x9e, 0x6b, 0x4d, 0x63, 0x0d, 0x84, 0x63, 0xb8, 0xb7, 0x4c, 0xde, + 0x87, 0x22, 0x57, 0x4b, 0x57, 0xd9, 0xe5, 0xe7, 0xc2, 0x6c, 0x33, 0x5c, 0xd2, 0xcf, 0x4e, 0xaa, + 0x93, 0x52, 0x38, 0x24, 0xa0, 0x6e, 0x62, 0x2c, 0x42, 0x39, 0x76, 0x0d, 0x24, 0x2c, 0x2c, 0x8e, + 0xf6, 0xb4, 0x85, 0x57, 0xa8, 0x4f, 0x51, 0x72, 0x8c, 0x67, 0x23, 0x30, 0x83, 0x8c, 0x3b, 0x3d, + 0xcf, 0x64, 0xf1, 0x1a, 0x25, 0x35, 0xe5, 0xdd, 0x70, 0xaa, 0x46, 0xb9, 0x24, 0xa9, 0xa8, 0xb8, + 0xe4, 0xeb, 0x30, 0xd9, 0x65, 0x5e, 0x4b, 0x6f, 0x36, 0x35, 0x49, 0xba, 0x8e, 0xb1, 0x19, 0x67, + 0x62, 0x52, 0x56, 0xe4, 0xd5, 0x5d, 0x6a, 0x5b, 0xfb, 0x8c, 0xfb, 0xe9, 0xd2, 0xc4, 0xa6, 0xa2, + 0xa3, 0x96, 0x20, 0xf7, 0x60, 0x96, 0x33, 0x7f, 0xfb, 0xc8, 0x66, 0x1e, 0xb2, 0x7d, 0xe6, 0xc9, + 0x9a, 0xf2, 0x98, 0x74, 0x99, 0xaf, 0xa9, 0x66, 0xb3, 0x8d, 0xb4, 0x00, 0xf6, 0xb7, 0x21, 0x2b, + 0x30, 0xc3, 0x7b, 0xa6, 0xc9, 0x38, 0x5f, 0x76, 0xec, 0xa6, 0xa5, 0x6f, 0xc0, 0xa3, 0xe3, 0x7c, + 0xa6, 0x91, 0xe2, 0x63, 0x5f, 0x0b, 0x81, 0xb2, 0x4f, 0xad, 0x4e, 0xcf, 0x63, 0x11, 0x4a, 0x21, + 0x89, 0xb2, 0x96, 0xe2, 0x63, 0x5f, 0x0b, 0xe3, 0x1d, 0x98, 0x44, 0xe6, 0x7b, 0xc7, 0xda, 0x26, + 0x55, 0xc8, 0x77, 0xac, 0xae, 0x15, 0x14, 0x59, 0xf2, 0xc1, 0x42, 0x7e, 0x28, 0x08, 0x18, 0xd0, + 0x8d, 0xef, 0xe6, 0x00, 0xa2, 0xdb, 0x5d, 0x72, 0x00, 0x45, 0x7e, 0xb7, 0xde, 0x33, 0x0f, 0x74, + 0x5d, 0x26, 0x63, 0x81, 0x5d, 0x81, 0x44, 0x53, 0x10, 0x52, 0x50, 0x2b, 0x78, 0xde, 0xdd, 0xdf, + 0xef, 0x8d, 0x82, 0x6e, 0x25, 0x26, 0x97, 0xd9, 0x4d, 0xd7, 0xb1, 0xec, 0xb0, 0xce, 0xad, 0x91, + 0x57, 0x15, 0x1d, 0xb5, 0x84, 0x58, 0x6f, 0x7b, 0xc1, 0x20, 0x46, 0x92, 0xeb, 0x4d, 0xf5, 0x41, + 0x71, 0x85, 0x9c, 0xc7, 0x5a, 0xc2, 0xd6, 0xa3, 0x49, 0x39, 0x94, 0x54, 0x54, 0x5c, 0x71, 0xcc, + 0x84, 0x85, 0x49, 0xb5, 0x46, 0xe4, 0x31, 0x13, 0xd6, 0x30, 0x51, 0x73, 0x49, 0x1b, 0xa6, 0xa9, + 0x9c, 0xda, 0xa8, 0xd8, 0xfa, 0x42, 0x75, 0xe3, 0xe8, 0x66, 0x31, 0x89, 0x82, 0x69, 0x58, 0xa1, + 0x89, 0x47, 0xcd, 0x5f, 0xbc, 0x7c, 0xac, 0x35, 0x35, 0x92, 0x28, 0x98, 0x86, 0x35, 0xfe, 0x20, + 0x07, 0x53, 0x0d, 0xd3, 0xb3, 0x5c, 0x5f, 0x6f, 0xe8, 0x2d, 0x79, 0xcf, 0xeb, 0x53, 0x11, 0xd0, + 0xa8, 0x85, 0xf2, 0xfa, 0x19, 0xc5, 0xa8, 0x40, 0x28, 0x71, 0x0d, 0x1c, 0x90, 0x30, 0x82, 0x90, + 0x29, 0xa3, 0x74, 0x19, 0xe9, 0x09, 0x6b, 0x48, 0x2a, 0x2a, 0xae, 0xf1, 0xbd, 0x1c, 0x14, 0xf5, + 0xe5, 0xcc, 0x1b, 0x90, 0x37, 0x9d, 0x9e, 0x5e, 0x10, 0xfa, 0x84, 0x58, 0x16, 0x44, 0x0c, 0x78, + 0x42, 0x48, 0xc6, 0x68, 0x0a, 0x38, 0x76, 0x8c, 0x50, 0xcf, 0xc7, 0x80, 0x27, 0x56, 0x22, 0xb3, + 0x9b, 0x6a, 0x11, 0xe8, 0x95, 0xb8, 0x6a, 0x37, 0x51, 0xd0, 0xe5, 0xbd, 0xa5, 0xe3, 0x75, 0xa9, + 0x9f, 0x4e, 0x68, 0xd7, 0x24, 0x15, 0x15, 0xd7, 0x98, 0x85, 0xe9, 0x46, 0x8f, 0xbb, 0xcc, 0x6e, + 0x86, 0x86, 0x32, 0x26, 0xa1, 0x1c, 0x7b, 0x21, 0x65, 0xfc, 0x68, 0x0e, 0xf4, 0xa5, 0xe4, 0xcf, + 0xae, 0x36, 0x33, 0x65, 0x15, 0xa6, 0x0e, 0x5c, 0xf3, 0xc3, 0x07, 0xae, 0x7a, 0x26, 0x53, 0xc1, + 0x6b, 0x2b, 0x0a, 0x5e, 0x0b, 0x17, 0x10, 0xbc, 0xea, 0x94, 0xa4, 0x2f, 0x80, 0xfd, 0xc3, 0x1c, + 0x4c, 0xd8, 0x22, 0x09, 0x55, 0xdb, 0xb2, 0x32, 0x2e, 0x03, 0xa6, 0xed, 0xa1, 0x8c, 0x58, 0xdb, + 0x8a, 0x21, 0x06, 0xf9, 0xb7, 0x4e, 0x12, 0xe3, 0x2c, 0x4c, 0xa8, 0x26, 0x6b, 0x50, 0xa4, 0xfb, + 0x22, 0xe3, 0xf0, 0x8f, 0xd5, 0xed, 0xea, 0xf5, 0x41, 0x7b, 0x7a, 0x49, 0xc9, 0x04, 0x3e, 0x30, + 0xfc, 0x42, 0xdd, 0x56, 0x1c, 0x22, 0x5d, 0x55, 0x03, 0x90, 0x29, 0x59, 0xd6, 0x43, 0x24, 0x2c, + 0x24, 0xc4, 0xce, 0x71, 0x45, 0x41, 0xad, 0x40, 0x84, 0xa7, 0x41, 0x4e, 0x23, 0x13, 0xba, 0x62, + 0x10, 0x9e, 0x06, 0xf9, 0x0e, 0x2a, 0x0e, 0x69, 0x85, 0xd1, 0x68, 0x59, 0x1a, 0xb7, 0x9e, 0x39, + 0x42, 0xd7, 0x01, 0xee, 0xe0, 0x70, 0x94, 0x3c, 0x88, 0xbb, 0xc5, 0x89, 0xf3, 0xb8, 0xc5, 0xc9, + 0x33, 0x5d, 0x62, 0x0b, 0x0a, 0x5c, 0x3a, 0x5d, 0x99, 0xc8, 0x95, 0xef, 0x2c, 0x67, 0x3b, 0x88, + 0x13, 0x7e, 0x3b, 0xb0, 0x4e, 0x40, 0x43, 0x05, 0x4f, 0x1c, 0x28, 0x7a, 0x2a, 0x60, 0x53, 0xb9, + 0x60, 0xb6, 0x92, 0x6d, 0x3a, 0xea, 0x0b, 0xd6, 0x47, 0x48, 0x45, 0xad, 0x84, 0x7c, 0x04, 0xa3, + 0x4d, 0xda, 0x52, 0x59, 0xe1, 0x37, 0x32, 0xdf, 0x43, 0x87, 0x6a, 0xe4, 0xd3, 0xa4, 0x95, 0xa5, + 0x7b, 0x28, 0x50, 0xc9, 0x01, 0x8c, 0xf3, 0xc0, 0x07, 0x57, 0x66, 0x86, 0x78, 0xf1, 0x93, 0xf2, + 0xe3, 0x41, 0x9e, 0xa0, 0x88, 0x18, 0x6a, 0x20, 0xab, 0x30, 0x7e, 0xe8, 0x74, 0x7a, 0x5d, 0x95, + 0x4e, 0x96, 0xef, 0xcc, 0x0f, 0x9a, 0xed, 0xc7, 0x52, 0x24, 0x72, 0x02, 0xc1, 0x37, 0xc7, 0xb0, + 0x2d, 0xf9, 0xed, 0x1c, 0x4c, 0x89, 0xad, 0xa3, 0xd7, 0x01, 0xaf, 0x90, 0x21, 0x56, 0xea, 0x23, + 0xce, 0xbc, 0x68, 0x85, 0x5d, 0x55, 0x6a, 0xa7, 0xd6, 0x13, 0x1a, 0x30, 0xa5, 0x91, 0xb8, 0x50, + 0xe4, 0x56, 0x93, 0x99, 0xd4, 0xe3, 0x95, 0xcb, 0x17, 0xa6, 0x3d, 0x8a, 0xff, 0x14, 0x36, 0x6a, + 0x2d, 0xe4, 0x77, 0xe4, 0x2b, 0x2d, 0xf5, 0x4e, 0x51, 0xbd, 0x1d, 0xbd, 0x72, 0x91, 0x6f, 0x47, + 0x2f, 0x07, 0x4f, 0xb4, 0x12, 0x1a, 0x30, 0xad, 0x92, 0x6c, 0xc3, 0x9c, 0x48, 0x3f, 0x0e, 0xd9, + 0x0a, 0xa3, 0xcd, 0x8e, 0x65, 0xb3, 0x06, 0x33, 0x1d, 0xbb, 0xc9, 0x2b, 0x73, 0xb2, 0xa8, 0xfe, + 0xda, 0xe9, 0x49, 0x75, 0x6e, 0x69, 0x90, 0x00, 0x0e, 0x6e, 0x47, 0x3e, 0x81, 0x49, 0x2f, 0x1e, + 0x85, 0x57, 0xae, 0x0e, 0x71, 0xa3, 0x9b, 0x88, 0xe7, 0x83, 0x72, 0x45, 0x82, 0x84, 0x49, 0x5d, + 0xe4, 0x36, 0x94, 0x5d, 0xe5, 0xa9, 0x2c, 0xde, 0xad, 0x5c, 0x93, 0x63, 0x90, 0x27, 0xea, 0x4e, + 0x44, 0xc6, 0xb8, 0x0c, 0x79, 0x04, 0x65, 0xdf, 0xe9, 0x30, 0x4f, 0x55, 0x9f, 0x2b, 0x72, 0xf2, + 0x17, 0x06, 0xad, 0xe4, 0x5d, 0x2d, 0x16, 0xd5, 0x36, 0x23, 0x1a, 0xc7, 0x38, 0x8e, 0x48, 0xe6, + 0xb8, 0xd9, 0x66, 0xcd, 0x5e, 0x87, 0x79, 0x32, 0x73, 0x7d, 0x2d, 0x99, 0xcc, 0x35, 0xe2, 0x4c, + 0x4c, 0xca, 0x8a, 0xf4, 0xcc, 0xf5, 0x2c, 0xc7, 0xb3, 0xfc, 0xe3, 0xe5, 0x0e, 0xe5, 0x5c, 0x02, + 0xcc, 0x4b, 0x00, 0x9d, 0x9e, 0xed, 0xa4, 0x05, 0xb0, 0xbf, 0x8d, 0x08, 0xdd, 0x43, 0x62, 0xe5, + 0x4b, 0x32, 0x09, 0x92, 0x6e, 0x29, 0x6c, 0x8b, 0x9a, 0x4b, 0x1e, 0x00, 0xe1, 0xc1, 0xa5, 0xe5, + 0x92, 0x29, 0x63, 0x47, 0xa9, 0xf3, 0x7a, 0xa2, 0x76, 0x49, 0x1a, 0x7d, 0x12, 0x38, 0xa0, 0x15, + 0xd9, 0x85, 0x72, 0xdb, 0xe1, 0xfe, 0x52, 0xc7, 0xa2, 0x9c, 0xf1, 0xca, 0xeb, 0xd2, 0xa4, 0x03, + 0x8f, 0x82, 0xfb, 0xa1, 0x58, 0x64, 0xd1, 0xfb, 0x51, 0x4b, 0x8c, 0xc3, 0x10, 0x26, 0x43, 0xfe, + 0x9e, 0x1c, 0xa0, 0x63, 0xfb, 0xec, 0xdb, 0x7e, 0x65, 0x41, 0x2e, 0xad, 0x9b, 0x83, 0x90, 0x77, + 0x9c, 0x66, 0x23, 0x29, 0x1d, 0x6c, 0x88, 0x14, 0x11, 0xd3, 0x98, 0xf3, 0x1f, 0xc2, 0x6c, 0x5f, + 0x00, 0xf1, 0x42, 0x95, 0xf5, 0xbf, 0xce, 0x41, 0x3c, 0x7e, 0xbb, 0xf0, 0x40, 0xf7, 0x1e, 0xcc, + 0xaa, 0x1f, 0x3b, 0x88, 0xd3, 0xa5, 0xd3, 0xf3, 0xc3, 0x0c, 0x2e, 0x96, 0xbb, 0x63, 0x5a, 0x00, + 0xfb, 0xdb, 0x18, 0x7f, 0x93, 0x83, 0xc9, 0x84, 0xbf, 0xba, 0xf0, 0xc4, 0x66, 0x0d, 0x48, 0xd7, + 0xf2, 0x3c, 0xc7, 0x0b, 0x9c, 0xfe, 0xa6, 0x58, 0x21, 0x5c, 0x3d, 0x7f, 0x94, 0x97, 0xf2, 0x9b, + 0x7d, 0x5c, 0x1c, 0xd0, 0xc2, 0xf8, 0x87, 0x1c, 0x44, 0xc5, 0x21, 0xfd, 0x12, 0x25, 0x77, 0xe6, + 0x4b, 0x94, 0xb7, 0xa0, 0xf8, 0x94, 0x3b, 0xf6, 0x4e, 0xf4, 0x5e, 0x45, 0x1b, 0xf4, 0x41, 0x63, + 0x7b, 0x4b, 0x4a, 0x6a, 0x09, 0x29, 0xfd, 0xf1, 0x9a, 0xd5, 0xf1, 0xfb, 0x5f, 0x75, 0x3c, 0xf8, + 0xd5, 0x80, 0x8e, 0x5a, 0x82, 0x2c, 0x42, 0x49, 0xd7, 0x23, 0x55, 0x46, 0xa4, 0x8d, 0xa0, 0x8b, + 0x71, 0x18, 0xc9, 0x18, 0xdf, 0x1f, 0x81, 0x62, 0x58, 0xcc, 0x23, 0xbf, 0x19, 0x8b, 0x0e, 0x03, + 0x03, 0xbf, 0x73, 0xbe, 0x2a, 0xf9, 0xf6, 0xde, 0x53, 0x66, 0xfa, 0x22, 0x04, 0x8c, 0xaa, 0x5f, + 0x11, 0x2d, 0x16, 0x12, 0x9a, 0x30, 0xc6, 0x5d, 0x66, 0x0e, 0xf5, 0xc3, 0x13, 0x5d, 0x7b, 0x74, + 0x99, 0x19, 0x19, 0x58, 0x7c, 0xa1, 0x04, 0x27, 0x07, 0x50, 0xe0, 0xf2, 0xe2, 0x48, 0x65, 0x5a, + 0xcb, 0x43, 0x96, 0x38, 0x05, 0x54, 0xfc, 0xa6, 0x54, 0x7c, 0xa3, 0x52, 0x61, 0x7c, 0x9e, 0x83, + 0x89, 0x50, 0xf4, 0xa1, 0xc5, 0x7d, 0xf2, 0xeb, 0x7d, 0x46, 0xac, 0x9d, 0xcf, 0x88, 0xa2, 0xb5, + 0x34, 0xa1, 0x9e, 0xe0, 0x90, 0x12, 0x33, 0xe0, 0x1e, 0xe4, 0x2d, 0xf9, 0x06, 0x74, 0x98, 0xf2, + 0x72, 0xd8, 0xdf, 0x28, 0x54, 0x0e, 0xde, 0x80, 0x06, 0xd0, 0xc6, 0xdf, 0xcd, 0x46, 0x43, 0x12, + 0x66, 0x25, 0x36, 0x94, 0xc2, 0x0d, 0x1e, 0x96, 0x8d, 0xdf, 0x1f, 0x2a, 0x0b, 0x8a, 0x16, 0x65, + 0x48, 0xe1, 0x18, 0xa9, 0x20, 0x77, 0x00, 0x98, 0xf0, 0x6c, 0x41, 0x4d, 0x69, 0x24, 0x59, 0x55, + 0x5d, 0xd5, 0x1c, 0x8c, 0x49, 0xbd, 0xfa, 0x0c, 0x7b, 0xf0, 0x99, 0x34, 0x96, 0xe9, 0x4c, 0x8a, + 0x05, 0xab, 0xf9, 0x21, 0x82, 0xd5, 0x4f, 0xe0, 0x4a, 0xf0, 0xef, 0x72, 0x87, 0x5a, 0x5d, 0x6d, + 0x5a, 0xf5, 0xfa, 0xe2, 0xcb, 0x03, 0x4f, 0x22, 0xe6, 0x71, 0x8b, 0xfb, 0xcc, 0xf6, 0x1f, 0x47, + 0x2d, 0xeb, 0xd7, 0x95, 0x8a, 0x2b, 0x8f, 0x07, 0xc0, 0xe1, 0x40, 0x25, 0xe9, 0xe8, 0x66, 0xfc, + 0x1c, 0xd1, 0xcd, 0xf7, 0x72, 0x30, 0x47, 0x07, 0xfd, 0x58, 0x41, 0xe5, 0xb8, 0x0f, 0x86, 0x8a, + 0x35, 0x13, 0x88, 0x2a, 0x56, 0x1c, 0xc4, 0xc2, 0xc1, 0x7d, 0x20, 0x6f, 0x46, 0xe9, 0x4a, 0x49, + 0x1e, 0x0a, 0x83, 0x13, 0x8d, 0xef, 0xa4, 0xcb, 0x04, 0x20, 0xad, 0xdd, 0x18, 0xda, 0xb7, 0x5d, + 0x40, 0xa9, 0xa0, 0x3c, 0x44, 0xa9, 0x20, 0x15, 0x7a, 0x4e, 0x5c, 0x50, 0xe8, 0x69, 0xc3, 0x8c, + 0xd5, 0xa5, 0x2d, 0xb6, 0xd3, 0xeb, 0x74, 0x82, 0x22, 0x26, 0xaf, 0x4c, 0x4a, 0xec, 0x81, 0x4f, + 0xe6, 0x44, 0x2a, 0xd0, 0x09, 0x8e, 0x12, 0x5d, 0xd7, 0x8f, 0xea, 0xee, 0xeb, 0x29, 0x24, 0xec, + 0xc3, 0x16, 0xcb, 0x52, 0xc4, 0x69, 0x5b, 0xcc, 0x17, 0xd6, 0x96, 0x59, 0xb4, 0xfa, 0x51, 0xd6, + 0xfd, 0x88, 0x8c, 0x71, 0x19, 0xb2, 0x01, 0xa5, 0xa6, 0xcd, 0x77, 0x9c, 0x8e, 0x65, 0x1e, 0xcb, + 0x54, 0xb8, 0x54, 0x7f, 0x5b, 0xb8, 0x81, 0x95, 0xad, 0x46, 0x40, 0x7c, 0x76, 0x52, 0xbd, 0xde, + 0xff, 0xab, 0xd3, 0x9a, 0xe6, 0x63, 0xd4, 0x9e, 0x6c, 0x4a, 0x30, 0xf5, 0x7e, 0x34, 0x48, 0x7b, + 0x6f, 0x9c, 0x11, 0x12, 0xae, 0x6c, 0x85, 0xcf, 0x5d, 0x27, 0x95, 0x3a, 0xf5, 0x2a, 0x34, 0x42, + 0x20, 0x37, 0xa1, 0xe0, 0xd8, 0xab, 0xdf, 0xb6, 0xfc, 0xca, 0x6c, 0xb2, 0xde, 0xb9, 0x2d, 0xa9, + 0xa8, 0xb8, 0xe4, 0x11, 0x5c, 0xf3, 0xfd, 0x8e, 0x4a, 0x7b, 0x96, 0xf6, 0x7d, 0xe6, 0x85, 0x77, + 0xd7, 0x15, 0x22, 0x43, 0xed, 0x2f, 0x9d, 0x9e, 0x54, 0xaf, 0xed, 0xee, 0x3e, 0x1c, 0x24, 0x82, + 0x67, 0xb5, 0x3d, 0x3b, 0x21, 0xbb, 0x9c, 0x31, 0x21, 0x8b, 0xe7, 0x00, 0x57, 0x7e, 0x6c, 0x0e, + 0xd0, 0x97, 0xb3, 0xcc, 0xbd, 0x40, 0xce, 0xf2, 0x91, 0xbc, 0xa1, 0xbf, 0xb7, 0x2c, 0xf3, 0x94, + 0xf2, 0x9d, 0xf7, 0xb2, 0x95, 0x99, 0x04, 0x42, 0x70, 0x51, 0x23, 0xff, 0xc5, 0x00, 0x93, 0xec, + 0xc0, 0x15, 0xd7, 0x69, 0xf6, 0xa5, 0x3c, 0x32, 0xb7, 0x2c, 0x45, 0xbe, 0x74, 0x67, 0x80, 0x0c, + 0x0e, 0x6c, 0x29, 0x7d, 0x69, 0x44, 0x97, 0x99, 0x62, 0x5e, 0xf9, 0xd2, 0x88, 0x8c, 0x71, 0x99, + 0x74, 0x5a, 0x53, 0x79, 0x69, 0x69, 0xcd, 0x6b, 0xff, 0x1f, 0xd3, 0x9a, 0xbf, 0x2f, 0xc0, 0x54, + 0x32, 0x60, 0x8b, 0x9e, 0xb8, 0xe4, 0xce, 0xfb, 0xc4, 0x25, 0xf1, 0x06, 0x65, 0xe4, 0xa5, 0xbe, + 0x41, 0x19, 0xbd, 0xf0, 0x37, 0x28, 0xb1, 0xb7, 0x36, 0x63, 0xcf, 0x79, 0x6b, 0xb3, 0x04, 0xd3, + 0xa6, 0xd3, 0x75, 0xe5, 0xb3, 0x71, 0xf5, 0xe2, 0x22, 0xb8, 0x15, 0xd5, 0xf7, 0x4e, 0xcb, 0x49, + 0x36, 0xa6, 0xe5, 0x09, 0x87, 0xbc, 0x2d, 0x1b, 0x16, 0x86, 0x78, 0x0f, 0x97, 0x9c, 0x30, 0x79, + 0xdc, 0xa9, 0x27, 0x69, 0x3a, 0x2e, 0x0d, 0xd4, 0x07, 0xba, 0x88, 0x07, 0x57, 0xdd, 0x41, 0xe1, + 0x0a, 0x57, 0x95, 0xf9, 0x1f, 0x17, 0x34, 0x2d, 0x28, 0xc4, 0xab, 0x03, 0x03, 0x1e, 0x8e, 0x67, + 0x20, 0xc7, 0x9f, 0xca, 0x14, 0x5f, 0xd6, 0x53, 0x99, 0xf9, 0xe3, 0xe0, 0xb5, 0xdb, 0x99, 0x0f, + 0xe5, 0x1e, 0x25, 0x1f, 0xaf, 0x7e, 0x98, 0xf1, 0xf7, 0xfa, 0xe1, 0x7b, 0xba, 0xf8, 0xc6, 0xf9, + 0x8f, 0x7c, 0x2c, 0xd6, 0xf7, 0x99, 0xfb, 0xb3, 0x9b, 0xaf, 0x4c, 0x37, 0x5f, 0x89, 0x9f, 0xe7, + 0xe5, 0x5f, 0xe1, 0xcf, 0xf3, 0x0a, 0x19, 0x7e, 0x9e, 0x37, 0xfe, 0x2a, 0x7f, 0x9e, 0x57, 0x3c, + 0xe7, 0xcf, 0xf3, 0x4a, 0x2f, 0xff, 0xe7, 0x79, 0x5f, 0xe4, 0x60, 0x26, 0x5c, 0xeb, 0xfa, 0xa6, + 0xf7, 0xe5, 0xd7, 0x3c, 0x0e, 0x12, 0x35, 0x8f, 0xf5, 0xa1, 0x5c, 0x65, 0xd8, 0xed, 0xb3, 0x6a, + 0x1f, 0xc6, 0x0f, 0x73, 0x70, 0x25, 0x2d, 0xfc, 0x0a, 0xca, 0x12, 0x4f, 0x93, 0x65, 0x89, 0xd5, + 0x0b, 0x19, 0xe4, 0x19, 0xe5, 0x89, 0xff, 0x19, 0x30, 0xc4, 0xff, 0x93, 0x32, 0x45, 0xc2, 0xb5, + 0x8d, 0xbc, 0x7c, 0xd7, 0x56, 0xaf, 0x7d, 0xf6, 0xc5, 0xc2, 0xa5, 0xcf, 0xbf, 0x58, 0xb8, 0xf4, + 0x83, 0x2f, 0x16, 0x2e, 0x7d, 0x7a, 0xba, 0x90, 0xfb, 0xec, 0x74, 0x21, 0xf7, 0xf9, 0xe9, 0x42, + 0xee, 0x07, 0xa7, 0x0b, 0xb9, 0x1f, 0x9e, 0x2e, 0xe4, 0xfe, 0xf8, 0x47, 0x0b, 0x97, 0x7e, 0xad, + 0x18, 0xe2, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x3e, 0x91, 0x31, 0xc4, 0x47, 0x00, + 0x00, +} + +func (m *ArchiveStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArchiveStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Tar != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Tar.Size())) + n1, err := m.Tar.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.None != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.None.Size())) + n2, err := m.None.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *Arguments) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Arguments) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + for _, msg := range m.Parameters { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Artifacts) > 0 { + for _, msg := range m.Artifacts { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Artifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Artifact) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.Mode != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.From))) + i += copy(dAtA[i:], m.From) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ArtifactLocation.Size())) + n3, err := m.ArtifactLocation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GlobalName))) + i += copy(dAtA[i:], m.GlobalName) + if m.Archive != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Archive.Size())) + n4, err := m.Archive.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + dAtA[i] = 0x40 + i++ + if m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *ArtifactLocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactLocation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ArchiveLogs != nil { + dAtA[i] = 0x8 + i++ + if *m.ArchiveLogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.S3 != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.S3.Size())) + n5, err := m.S3.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.Git != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Git.Size())) + n6, err := m.Git.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.HTTP != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) + n7, err := m.HTTP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Artifactory != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Artifactory.Size())) + n8, err := m.Artifactory.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.HDFS != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HDFS.Size())) + n9, err := m.HDFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Raw != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Raw.Size())) + n10, err := m.Raw.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} + +func (m *ArtifactRepositoryRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactRepositoryRef) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConfigMap))) + i += copy(dAtA[i:], m.ConfigMap) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + return i, nil +} + +func (m *ArtifactoryArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactoryArtifact) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i += copy(dAtA[i:], m.URL) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ArtifactoryAuth.Size())) + n11, err := m.ArtifactoryAuth.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil +} + +func (m *ArtifactoryAuth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtifactoryAuth) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UsernameSecret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UsernameSecret.Size())) + n12, err := m.UsernameSecret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.PasswordSecret != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PasswordSecret.Size())) + n13, err := m.PasswordSecret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} + +func (m *ContinueOn) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContinueOn) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Error { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x10 + i++ + if m.Failed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *DAGTask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DAGTask) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) + i += copy(dAtA[i:], m.Template) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Arguments.Size())) + n14, err := m.Arguments.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + if m.TemplateRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateRef.Size())) + n15, err := m.TemplateRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if len(m.Dependencies) > 0 { + for _, s := range m.Dependencies { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.WithItems) > 0 { + for _, msg := range m.WithItems { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WithParam))) + i += copy(dAtA[i:], m.WithParam) + if m.WithSequence != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.WithSequence.Size())) + n16, err := m.WithSequence.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.When))) + i += copy(dAtA[i:], m.When) + if m.ContinueOn != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ContinueOn.Size())) + n17, err := m.ContinueOn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *DAGTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DAGTemplate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.FailFast != nil { + dAtA[i] = 0x18 + i++ + if *m.FailFast { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *GitArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitArtifact) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repo))) + i += copy(dAtA[i:], m.Repo) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i += copy(dAtA[i:], m.Revision) + if m.Depth != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Depth)) + } + if len(m.Fetch) > 0 { + for _, s := range m.Fetch { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.UsernameSecret != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UsernameSecret.Size())) + n18, err := m.UsernameSecret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.PasswordSecret != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PasswordSecret.Size())) + n19, err := m.PasswordSecret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.SSHPrivateKeySecret != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SSHPrivateKeySecret.Size())) + n20, err := m.SSHPrivateKeySecret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + dAtA[i] = 0x40 + i++ + if m.InsecureIgnoreHostKey { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *HDFSArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HDFSArtifact) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HDFSConfig.Size())) + n21, err := m.HDFSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x18 + i++ + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *HDFSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HDFSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HDFSKrbConfig.Size())) + n22, err := m.HDFSKrbConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HDFSUser))) + i += copy(dAtA[i:], m.HDFSUser) + return i, nil +} + +func (m *HDFSKrbConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HDFSKrbConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.KrbCCacheSecret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.KrbCCacheSecret.Size())) + n23, err := m.KrbCCacheSecret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.KrbKeytabSecret != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.KrbKeytabSecret.Size())) + n24, err := m.KrbKeytabSecret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbUsername))) + i += copy(dAtA[i:], m.KrbUsername) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbRealm))) + i += copy(dAtA[i:], m.KrbRealm) + if m.KrbConfigConfigMap != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.KrbConfigConfigMap.Size())) + n25, err := m.KrbConfigConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbServicePrincipalName))) + i += copy(dAtA[i:], m.KrbServicePrincipalName) + return i, nil +} + +func (m *HTTPArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPArtifact) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i += copy(dAtA[i:], m.URL) + return i, nil +} + +func (m *Inputs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Inputs) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + for _, msg := range m.Parameters { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Artifacts) > 0 { + for _, msg := range m.Artifacts { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Item) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Item) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NumVal))) + i += copy(dAtA[i:], m.NumVal) + dAtA[i] = 0x18 + i++ + if m.BoolVal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) + i += copy(dAtA[i:], m.StrVal) + if len(m.MapVal) > 0 { + keysForMapVal := make([]string, 0, len(m.MapVal)) + for k := range m.MapVal { + keysForMapVal = append(keysForMapVal, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMapVal) + for _, k := range keysForMapVal { + dAtA[i] = 0x2a + i++ + v := m.MapVal[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n26, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + } + if len(m.ListVal) > 0 { + for _, msg := range m.ListVal { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ItemValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ItemValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NumVal))) + i += copy(dAtA[i:], m.NumVal) + dAtA[i] = 0x18 + i++ + if m.BoolVal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) + i += copy(dAtA[i:], m.StrVal) + if len(m.MapVal) > 0 { + keysForMapVal := make([]string, 0, len(m.MapVal)) + for k := range m.MapVal { + keysForMapVal = append(keysForMapVal, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMapVal) + for _, k := range keysForMapVal { + dAtA[i] = 0x2a + i++ + v := m.MapVal[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.ListVal) > 0 { + for _, b := range m.ListVal { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } + } + return i, nil +} + +func (m *Metadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for _, k := range keysForAnnotations { + dAtA[i] = 0xa + i++ + v := m.Annotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for _, k := range keysForLabels { + dAtA[i] = 0x12 + i++ + v := m.Labels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i += copy(dAtA[i:], m.DisplayName) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TemplateName))) + i += copy(dAtA[i:], m.TemplateName) + if m.TemplateRef != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateRef.Size())) + n27, err := m.TemplateRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BoundaryID))) + i += copy(dAtA[i:], m.BoundaryID) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) + n28, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) + n29, err := m.FinishedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i += copy(dAtA[i:], m.PodIP) + if m.Daemoned != nil { + dAtA[i] = 0x68 + i++ + if *m.Daemoned { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Inputs != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Inputs.Size())) + n30, err := m.Inputs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.Outputs != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Outputs.Size())) + n31, err := m.Outputs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if len(m.Children) > 0 { + for _, s := range m.Children { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.OutboundNodes) > 0 { + for _, s := range m.OutboundNodes { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *NoneStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NoneStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *Outputs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Outputs) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Parameters) > 0 { + for _, msg := range m.Parameters { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Artifacts) > 0 { + for _, msg := range m.Artifacts { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Result != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Result))) + i += copy(dAtA[i:], *m.Result) + } + return i, nil +} + +func (m *ParallelSteps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParallelSteps) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Steps) > 0 { + for _, msg := range m.Steps { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Parameter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Parameter) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Default != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Default))) + i += copy(dAtA[i:], *m.Default) + } + if m.Value != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + if m.ValueFrom != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) + n32, err := m.ValueFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GlobalName))) + i += copy(dAtA[i:], m.GlobalName) + return i, nil +} + +func (m *PodGC) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodGC) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Strategy))) + i += copy(dAtA[i:], m.Strategy) + return i, nil +} + +func (m *RawArtifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawArtifact) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + return i, nil +} + +func (m *ResourceTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceTemplate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MergeStrategy))) + i += copy(dAtA[i:], m.MergeStrategy) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manifest))) + i += copy(dAtA[i:], m.Manifest) + dAtA[i] = 0x20 + i++ + if m.SetOwnerReference { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SuccessCondition))) + i += copy(dAtA[i:], m.SuccessCondition) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailureCondition))) + i += copy(dAtA[i:], m.FailureCondition) + return i, nil +} + +func (m *RetryStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetryStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Limit != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Limit)) + } + return i, nil +} + +func (m *S3Artifact) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *S3Artifact) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.S3Bucket.Size())) + n33, err := m.S3Bucket.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + return i, nil +} + +func (m *S3Bucket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *S3Bucket) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) + i += copy(dAtA[i:], m.Endpoint) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bucket))) + i += copy(dAtA[i:], m.Bucket) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i += copy(dAtA[i:], m.Region) + if m.Insecure != nil { + dAtA[i] = 0x20 + i++ + if *m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AccessKeySecret.Size())) + n34, err := m.AccessKeySecret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeySecret.Size())) + n35, err := m.SecretKeySecret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil +} + +func (m *ScriptTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScriptTemplate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Container.Size())) + n36, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + return i, nil +} + +func (m *Sequence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sequence) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Count))) + i += copy(dAtA[i:], m.Count) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Start))) + i += copy(dAtA[i:], m.Start) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.End))) + i += copy(dAtA[i:], m.End) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Format))) + i += copy(dAtA[i:], m.Format) + return i, nil +} + +func (m *SuspendTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SuspendTemplate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TarStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TarStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *Template) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Template) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) + i += copy(dAtA[i:], m.Template) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Arguments.Size())) + n37, err := m.Arguments.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if m.TemplateRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateRef.Size())) + n38, err := m.TemplateRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Inputs.Size())) + n39, err := m.Inputs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Outputs.Size())) + n40, err := m.Outputs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for _, k := range keysForNodeSelector { + dAtA[i] = 0x3a + i++ + v := m.NodeSelector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.Affinity != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) + n41, err := m.Affinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) + n42, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + if m.Daemon != nil { + dAtA[i] = 0x50 + i++ + if *m.Daemon { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Steps) > 0 { + for _, msg := range m.Steps { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Container != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Container.Size())) + n43, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + if m.Script != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Script.Size())) + n44, err := m.Script.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.Resource != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n45, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + if m.DAG != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DAG.Size())) + n46, err := m.DAG.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + if m.Suspend != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Suspend.Size())) + n47, err := m.Suspend.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.InitContainers) > 0 { + for _, msg := range m.InitContainers { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Sidecars) > 0 { + for _, msg := range m.Sidecars { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ArchiveLocation != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ArchiveLocation.Size())) + n48, err := m.ArchiveLocation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + if m.ActiveDeadlineSeconds != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + } + if m.RetryStrategy != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryStrategy.Size())) + n49, err := m.RetryStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + if m.Parallelism != nil { + dAtA[i] = 0xb8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) + } + if len(m.Tolerations) > 0 { + for _, msg := range m.Tolerations { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0xca + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i += copy(dAtA[i:], m.SchedulerName) + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i += copy(dAtA[i:], m.PriorityClassName) + if m.Priority != nil { + dAtA[i] = 0xd8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + } + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i += copy(dAtA[i:], m.ServiceAccountName) + if len(m.HostAliases) > 0 { + for _, msg := range m.HostAliases { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.SecurityContext != nil { + dAtA[i] = 0xf2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) + n50, err := m.SecurityContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + return i, nil +} + +func (m *TemplateRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateRef) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) + i += copy(dAtA[i:], m.Template) + dAtA[i] = 0x18 + i++ + if m.RuntimeResolution { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + +func (m *UserContainer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserContainer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Container.Size())) + n51, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + if m.MirrorVolumeMounts != nil { + dAtA[i] = 0x10 + i++ + if *m.MirrorVolumeMounts { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ValueFrom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueFrom) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JSONPath))) + i += copy(dAtA[i:], m.JSONPath) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JQFilter))) + i += copy(dAtA[i:], m.JQFilter) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Parameter))) + i += copy(dAtA[i:], m.Parameter) + return i, nil +} + +func (m *Workflow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Workflow) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n52, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n53, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n54, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + return i, nil +} + +func (m *WorkflowList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n55, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n55 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WorkflowSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Templates) > 0 { + for _, msg := range m.Templates { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Entrypoint))) + i += copy(dAtA[i:], m.Entrypoint) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Arguments.Size())) + n56, err := m.Arguments.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n56 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i += copy(dAtA[i:], m.ServiceAccountName) + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Parallelism != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) + } + if m.ArtifactRepositoryRef != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ArtifactRepositoryRef.Size())) + n57, err := m.ArtifactRepositoryRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 + } + if m.Suspend != nil { + dAtA[i] = 0x48 + i++ + if *m.Suspend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for _, k := range keysForNodeSelector { + dAtA[i] = 0x52 + i++ + v := m.NodeSelector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.Affinity != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) + n58, err := m.Affinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + if len(m.Tolerations) > 0 { + for _, msg := range m.Tolerations { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.HostNetwork != nil { + dAtA[i] = 0x70 + i++ + if *m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.DNSPolicy != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DNSPolicy))) + i += copy(dAtA[i:], *m.DNSPolicy) + } + if m.DNSConfig != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) + n59, err := m.DNSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + } + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OnExit))) + i += copy(dAtA[i:], m.OnExit) + if m.TTLSecondsAfterFinished != nil { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + } + if m.ActiveDeadlineSeconds != nil { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + } + if m.Priority != nil { + dAtA[i] = 0xa0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + } + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i += copy(dAtA[i:], m.SchedulerName) + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodPriorityClassName))) + i += copy(dAtA[i:], m.PodPriorityClassName) + if m.PodPriority != nil { + dAtA[i] = 0xb8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.PodPriority)) + } + if len(m.HostAliases) > 0 { + for _, msg := range m.HostAliases { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.SecurityContext != nil { + dAtA[i] = 0xca + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) + n60, err := m.SecurityContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n60 + } + if m.PodGC != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodGC.Size())) + n61, err := m.PodGC.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n61 + } + return i, nil +} + +func (m *WorkflowStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) + n62, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) + n63, err := m.FinishedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n63 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CompressedNodes))) + i += copy(dAtA[i:], m.CompressedNodes) + if len(m.Nodes) > 0 { + keysForNodes := make([]string, 0, len(m.Nodes)) + for k := range m.Nodes { + keysForNodes = append(keysForNodes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) + for _, k := range keysForNodes { + dAtA[i] = 0x32 + i++ + v := m.Nodes[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n64, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n64 + } + } + if len(m.PersistentVolumeClaims) > 0 { + for _, msg := range m.PersistentVolumeClaims { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Outputs != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Outputs.Size())) + n65, err := m.Outputs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n65 + } + return i, nil +} + +func (m *WorkflowStep) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowStep) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Template))) + i += copy(dAtA[i:], m.Template) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Arguments.Size())) + n66, err := m.Arguments.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n66 + if m.TemplateRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateRef.Size())) + n67, err := m.TemplateRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n67 + } + if len(m.WithItems) > 0 { + for _, msg := range m.WithItems { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WithParam))) + i += copy(dAtA[i:], m.WithParam) + if m.WithSequence != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.WithSequence.Size())) + n68, err := m.WithSequence.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n68 + } + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.When))) + i += copy(dAtA[i:], m.When) + if m.ContinueOn != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ContinueOn.Size())) + n69, err := m.ContinueOn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n69 + } + return i, nil +} + +func (m *WorkflowTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n70, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n70 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n71, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n71 + return i, nil +} + +func (m *WorkflowTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplateList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n72, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n72 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WorkflowTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Templates) > 0 { + for _, msg := range m.Templates { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Arguments.Size())) + n73, err := m.Arguments.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n73 + return i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ArchiveStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Tar != nil { + l = m.Tar.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.None != nil { + l = m.None.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Arguments) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Artifacts) > 0 { + for _, e := range m.Artifacts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Artifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + l = len(m.From) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ArtifactLocation.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GlobalName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Archive != nil { + l = m.Archive.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *ArtifactLocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ArchiveLogs != nil { + n += 2 + } + if m.S3 != nil { + l = m.S3.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Artifactory != nil { + l = m.Artifactory.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HDFS != nil { + l = m.HDFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Raw != nil { + l = m.Raw.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ArtifactRepositoryRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConfigMap) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ArtifactoryArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ArtifactoryAuth.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ArtifactoryAuth) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UsernameSecret != nil { + l = m.UsernameSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PasswordSecret != nil { + l = m.PasswordSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContinueOn) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *DAGTask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Template) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.TemplateRef != nil { + l = m.TemplateRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Dependencies) > 0 { + for _, s := range m.Dependencies { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.WithItems) > 0 { + for _, e := range m.WithItems { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WithParam) + n += 1 + l + sovGenerated(uint64(l)) + if m.WithSequence != nil { + l = m.WithSequence.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.When) + n += 1 + l + sovGenerated(uint64(l)) + if m.ContinueOn != nil { + l = m.ContinueOn.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DAGTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Target) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailFast != nil { + n += 2 + } + return n +} + +func (m *GitArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Repo) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + if m.Depth != nil { + n += 1 + sovGenerated(uint64(*m.Depth)) + } + if len(m.Fetch) > 0 { + for _, s := range m.Fetch { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.UsernameSecret != nil { + l = m.UsernameSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PasswordSecret != nil { + l = m.PasswordSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SSHPrivateKeySecret != nil { + l = m.SSHPrivateKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *HDFSArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.HDFSConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *HDFSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.HDFSKrbConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.HDFSUser) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HDFSKrbConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KrbCCacheSecret != nil { + l = m.KrbCCacheSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.KrbKeytabSecret != nil { + l = m.KrbKeytabSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.KrbUsername) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KrbRealm) + n += 1 + l + sovGenerated(uint64(l)) + if m.KrbConfigConfigMap != nil { + l = m.KrbConfigConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.KrbServicePrincipalName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HTTPArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Inputs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Artifacts) > 0 { + for _, e := range m.Artifacts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Item) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + l = len(m.NumVal) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.MapVal) > 0 { + for k, v := range m.MapVal { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.ListVal) > 0 { + for _, e := range m.ListVal { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ItemValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + l = len(m.NumVal) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.MapVal) > 0 { + for k, v := range m.MapVal { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.ListVal) > 0 { + for _, b := range m.ListVal { + l = len(b) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Metadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TemplateName) + n += 1 + l + sovGenerated(uint64(l)) + if m.TemplateRef != nil { + l = m.TemplateRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BoundaryID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.Daemoned != nil { + n += 2 + } + if m.Inputs != nil { + l = m.Inputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Outputs != nil { + l = m.Outputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Children) > 0 { + for _, s := range m.Children { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.OutboundNodes) > 0 { + for _, s := range m.OutboundNodes { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NoneStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Outputs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Artifacts) > 0 { + for _, e := range m.Artifacts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Result != nil { + l = len(*m.Result) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ParallelSteps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Steps) > 0 { + for _, e := range m.Steps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Parameter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Default != nil { + l = len(*m.Default) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.GlobalName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodGC) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Strategy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RawArtifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MergeStrategy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Manifest) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.SuccessCondition) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FailureCondition) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RetryStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Limit != nil { + n += 1 + sovGenerated(uint64(*m.Limit)) + } + return n +} + +func (m *S3Artifact) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.S3Bucket.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *S3Bucket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Endpoint) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Bucket) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + if m.Insecure != nil { + n += 2 + } + l = m.AccessKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SecretKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScriptTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Source) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Sequence) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Count) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Start) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.End) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Format) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SuspendTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *TarStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Template) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Template) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.TemplateRef != nil { + l = m.TemplateRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Inputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Outputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Affinity != nil { + l = m.Affinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Daemon != nil { + n += 2 + } + if len(m.Steps) > 0 { + for _, e := range m.Steps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Script != nil { + l = m.Script.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DAG != nil { + l = m.DAG.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Suspend != nil { + l = m.Suspend.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.Sidecars) > 0 { + for _, e := range m.Sidecars { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.ArchiveLocation != nil { + l = m.ArchiveLocation.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ActiveDeadlineSeconds != nil { + n += 2 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.RetryStrategy != nil { + l = m.RetryStrategy.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Parallelism != nil { + n += 2 + sovGenerated(uint64(*m.Parallelism)) + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + l = len(m.ServiceAccountName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TemplateRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Template) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *UserContainer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MirrorVolumeMounts != nil { + n += 2 + } + return n +} + +func (m *ValueFrom) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.JSONPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.JQFilter) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Parameter) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Workflow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WorkflowList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WorkflowSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Templates) > 0 { + for _, e := range m.Templates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Entrypoint) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.ArtifactRepositoryRef != nil { + l = m.ArtifactRepositoryRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Suspend != nil { + n += 2 + } + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Affinity != nil { + l = m.Affinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.HostNetwork != nil { + n += 2 + } + if m.DNSPolicy != nil { + l = len(*m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.OnExit) + n += 2 + l + sovGenerated(uint64(l)) + if m.TTLSecondsAfterFinished != nil { + n += 2 + sovGenerated(uint64(*m.TTLSecondsAfterFinished)) + } + if m.ActiveDeadlineSeconds != nil { + n += 2 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.PodPriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.PodPriority != nil { + n += 2 + sovGenerated(uint64(*m.PodPriority)) + } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PodGC != nil { + l = m.PodGC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WorkflowStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CompressedNodes) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Nodes) > 0 { + for k, v := range m.Nodes { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.PersistentVolumeClaims) > 0 { + for _, e := range m.PersistentVolumeClaims { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Outputs != nil { + l = m.Outputs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WorkflowStep) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Template) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.TemplateRef != nil { + l = m.TemplateRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.WithItems) > 0 { + for _, e := range m.WithItems { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WithParam) + n += 1 + l + sovGenerated(uint64(l)) + if m.WithSequence != nil { + l = m.WithSequence.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.When) + n += 1 + l + sovGenerated(uint64(l)) + if m.ContinueOn != nil { + l = m.ContinueOn.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WorkflowTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WorkflowTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WorkflowTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Templates) > 0 { + for _, e := range m.Templates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Arguments.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ArchiveStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArchiveStrategy{`, + `Tar:` + strings.Replace(fmt.Sprintf("%v", this.Tar), "TarStrategy", "TarStrategy", 1) + `,`, + `None:` + strings.Replace(fmt.Sprintf("%v", this.None), "NoneStrategy", "NoneStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Arguments) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Arguments{`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "Parameter", "Parameter", 1), `&`, ``, 1) + `,`, + `Artifacts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Artifacts), "Artifact", "Artifact", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Artifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Artifact{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `ArtifactLocation:` + strings.Replace(strings.Replace(this.ArtifactLocation.String(), "ArtifactLocation", "ArtifactLocation", 1), `&`, ``, 1) + `,`, + `GlobalName:` + fmt.Sprintf("%v", this.GlobalName) + `,`, + `Archive:` + strings.Replace(fmt.Sprintf("%v", this.Archive), "ArchiveStrategy", "ArchiveStrategy", 1) + `,`, + `Optional:` + fmt.Sprintf("%v", this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactLocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactLocation{`, + `ArchiveLogs:` + valueToStringGenerated(this.ArchiveLogs) + `,`, + `S3:` + strings.Replace(fmt.Sprintf("%v", this.S3), "S3Artifact", "S3Artifact", 1) + `,`, + `Git:` + strings.Replace(fmt.Sprintf("%v", this.Git), "GitArtifact", "GitArtifact", 1) + `,`, + `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPArtifact", "HTTPArtifact", 1) + `,`, + `Artifactory:` + strings.Replace(fmt.Sprintf("%v", this.Artifactory), "ArtifactoryArtifact", "ArtifactoryArtifact", 1) + `,`, + `HDFS:` + strings.Replace(fmt.Sprintf("%v", this.HDFS), "HDFSArtifact", "HDFSArtifact", 1) + `,`, + `Raw:` + strings.Replace(fmt.Sprintf("%v", this.Raw), "RawArtifact", "RawArtifact", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactRepositoryRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactRepositoryRef{`, + `ConfigMap:` + fmt.Sprintf("%v", this.ConfigMap) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactoryArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactoryArtifact{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `ArtifactoryAuth:` + strings.Replace(strings.Replace(this.ArtifactoryAuth.String(), "ArtifactoryAuth", "ArtifactoryAuth", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ArtifactoryAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ArtifactoryAuth{`, + `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContinueOn) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContinueOn{`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `}`, + }, "") + return s +} +func (this *DAGTask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DAGTask{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Template:` + fmt.Sprintf("%v", this.Template) + `,`, + `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, + `TemplateRef:` + strings.Replace(fmt.Sprintf("%v", this.TemplateRef), "TemplateRef", "TemplateRef", 1) + `,`, + `Dependencies:` + fmt.Sprintf("%v", this.Dependencies) + `,`, + `WithItems:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WithItems), "Item", "Item", 1), `&`, ``, 1) + `,`, + `WithParam:` + fmt.Sprintf("%v", this.WithParam) + `,`, + `WithSequence:` + strings.Replace(fmt.Sprintf("%v", this.WithSequence), "Sequence", "Sequence", 1) + `,`, + `When:` + fmt.Sprintf("%v", this.When) + `,`, + `ContinueOn:` + strings.Replace(fmt.Sprintf("%v", this.ContinueOn), "ContinueOn", "ContinueOn", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DAGTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DAGTemplate{`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `Tasks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tasks), "DAGTask", "DAGTask", 1), `&`, ``, 1) + `,`, + `FailFast:` + valueToStringGenerated(this.FailFast) + `,`, + `}`, + }, "") + return s +} +func (this *GitArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitArtifact{`, + `Repo:` + fmt.Sprintf("%v", this.Repo) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Depth:` + valueToStringGenerated(this.Depth) + `,`, + `Fetch:` + fmt.Sprintf("%v", this.Fetch) + `,`, + `UsernameSecret:` + strings.Replace(fmt.Sprintf("%v", this.UsernameSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `PasswordSecret:` + strings.Replace(fmt.Sprintf("%v", this.PasswordSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SSHPrivateKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SSHPrivateKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `InsecureIgnoreHostKey:` + fmt.Sprintf("%v", this.InsecureIgnoreHostKey) + `,`, + `}`, + }, "") + return s +} +func (this *HDFSArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HDFSArtifact{`, + `HDFSConfig:` + strings.Replace(strings.Replace(this.HDFSConfig.String(), "HDFSConfig", "HDFSConfig", 1), `&`, ``, 1) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `}`, + }, "") + return s +} +func (this *HDFSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HDFSConfig{`, + `HDFSKrbConfig:` + strings.Replace(strings.Replace(this.HDFSKrbConfig.String(), "HDFSKrbConfig", "HDFSKrbConfig", 1), `&`, ``, 1) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `HDFSUser:` + fmt.Sprintf("%v", this.HDFSUser) + `,`, + `}`, + }, "") + return s +} +func (this *HDFSKrbConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HDFSKrbConfig{`, + `KrbCCacheSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbCCacheSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `KrbKeytabSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbKeytabSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `KrbUsername:` + fmt.Sprintf("%v", this.KrbUsername) + `,`, + `KrbRealm:` + fmt.Sprintf("%v", this.KrbRealm) + `,`, + `KrbConfigConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.KrbConfigConfigMap), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, + `KrbServicePrincipalName:` + fmt.Sprintf("%v", this.KrbServicePrincipalName) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPArtifact{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `}`, + }, "") + return s +} +func (this *Inputs) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Inputs{`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "Parameter", "Parameter", 1), `&`, ``, 1) + `,`, + `Artifacts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Artifacts), "Artifact", "Artifact", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Metadata) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Metadata{`, + `Annotations:` + mapStringForAnnotations + `,`, + `Labels:` + mapStringForLabels + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeStatus{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `TemplateName:` + fmt.Sprintf("%v", this.TemplateName) + `,`, + `TemplateRef:` + strings.Replace(fmt.Sprintf("%v", this.TemplateRef), "TemplateRef", "TemplateRef", 1) + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `BoundaryID:` + fmt.Sprintf("%v", this.BoundaryID) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `Daemoned:` + valueToStringGenerated(this.Daemoned) + `,`, + `Inputs:` + strings.Replace(fmt.Sprintf("%v", this.Inputs), "Inputs", "Inputs", 1) + `,`, + `Outputs:` + strings.Replace(fmt.Sprintf("%v", this.Outputs), "Outputs", "Outputs", 1) + `,`, + `Children:` + fmt.Sprintf("%v", this.Children) + `,`, + `OutboundNodes:` + fmt.Sprintf("%v", this.OutboundNodes) + `,`, + `}`, + }, "") + return s +} +func (this *NoneStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NoneStrategy{`, + `}`, + }, "") + return s +} +func (this *Outputs) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Outputs{`, + `Parameters:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Parameters), "Parameter", "Parameter", 1), `&`, ``, 1) + `,`, + `Artifacts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Artifacts), "Artifact", "Artifact", 1), `&`, ``, 1) + `,`, + `Result:` + valueToStringGenerated(this.Result) + `,`, + `}`, + }, "") + return s +} +func (this *ParallelSteps) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ParallelSteps{`, + `Steps:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Steps), "WorkflowStep", "WorkflowStep", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Parameter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Parameter{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Default:` + valueToStringGenerated(this.Default) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "ValueFrom", "ValueFrom", 1) + `,`, + `GlobalName:` + fmt.Sprintf("%v", this.GlobalName) + `,`, + `}`, + }, "") + return s +} +func (this *PodGC) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodGC{`, + `Strategy:` + fmt.Sprintf("%v", this.Strategy) + `,`, + `}`, + }, "") + return s +} +func (this *RawArtifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RawArtifact{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceTemplate{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `MergeStrategy:` + fmt.Sprintf("%v", this.MergeStrategy) + `,`, + `Manifest:` + fmt.Sprintf("%v", this.Manifest) + `,`, + `SetOwnerReference:` + fmt.Sprintf("%v", this.SetOwnerReference) + `,`, + `SuccessCondition:` + fmt.Sprintf("%v", this.SuccessCondition) + `,`, + `FailureCondition:` + fmt.Sprintf("%v", this.FailureCondition) + `,`, + `}`, + }, "") + return s +} +func (this *RetryStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RetryStrategy{`, + `Limit:` + valueToStringGenerated(this.Limit) + `,`, + `}`, + }, "") + return s +} +func (this *S3Artifact) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&S3Artifact{`, + `S3Bucket:` + strings.Replace(strings.Replace(this.S3Bucket.String(), "S3Bucket", "S3Bucket", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `}`, + }, "") + return s +} +func (this *S3Bucket) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&S3Bucket{`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `Bucket:` + fmt.Sprintf("%v", this.Bucket) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `Insecure:` + valueToStringGenerated(this.Insecure) + `,`, + `AccessKeySecret:` + strings.Replace(strings.Replace(this.AccessKeySecret.String(), "SecretKeySelector", "v1.SecretKeySelector", 1), `&`, ``, 1) + `,`, + `SecretKeySecret:` + strings.Replace(strings.Replace(this.SecretKeySecret.String(), "SecretKeySelector", "v1.SecretKeySelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScriptTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScriptTemplate{`, + `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "v1.Container", 1), `&`, ``, 1) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `}`, + }, "") + return s +} +func (this *Sequence) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sequence{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Start:` + fmt.Sprintf("%v", this.Start) + `,`, + `End:` + fmt.Sprintf("%v", this.End) + `,`, + `Format:` + fmt.Sprintf("%v", this.Format) + `,`, + `}`, + }, "") + return s +} +func (this *SuspendTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SuspendTemplate{`, + `}`, + }, "") + return s +} +func (this *TarStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TarStrategy{`, + `}`, + }, "") + return s +} +func (this *Template) String() string { + if this == nil { + return "nil" + } + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Template{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Template:` + fmt.Sprintf("%v", this.Template) + `,`, + `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, + `TemplateRef:` + strings.Replace(fmt.Sprintf("%v", this.TemplateRef), "TemplateRef", "TemplateRef", 1) + `,`, + `Inputs:` + strings.Replace(strings.Replace(this.Inputs.String(), "Inputs", "Inputs", 1), `&`, ``, 1) + `,`, + `Outputs:` + strings.Replace(strings.Replace(this.Outputs.String(), "Outputs", "Outputs", 1), `&`, ``, 1) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `Metadata:` + strings.Replace(strings.Replace(this.Metadata.String(), "Metadata", "Metadata", 1), `&`, ``, 1) + `,`, + `Daemon:` + valueToStringGenerated(this.Daemon) + `,`, + `Steps:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Steps), "ParallelSteps", "ParallelSteps", 1), `&`, ``, 1) + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1) + `,`, + `Script:` + strings.Replace(fmt.Sprintf("%v", this.Script), "ScriptTemplate", "ScriptTemplate", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceTemplate", "ResourceTemplate", 1) + `,`, + `DAG:` + strings.Replace(fmt.Sprintf("%v", this.DAG), "DAGTemplate", "DAGTemplate", 1) + `,`, + `Suspend:` + strings.Replace(fmt.Sprintf("%v", this.Suspend), "SuspendTemplate", "SuspendTemplate", 1) + `,`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "v1.Volume", 1), `&`, ``, 1) + `,`, + `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "UserContainer", "UserContainer", 1), `&`, ``, 1) + `,`, + `Sidecars:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sidecars), "UserContainer", "UserContainer", 1), `&`, ``, 1) + `,`, + `ArchiveLocation:` + strings.Replace(fmt.Sprintf("%v", this.ArchiveLocation), "ArtifactLocation", "ArtifactLocation", 1) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `RetryStrategy:` + strings.Replace(fmt.Sprintf("%v", this.RetryStrategy), "RetryStrategy", "RetryStrategy", 1) + `,`, + `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, + `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "v1.Toleration", 1), `&`, ``, 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "v1.HostAlias", 1), `&`, ``, 1) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateRef{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Template:` + fmt.Sprintf("%v", this.Template) + `,`, + `RuntimeResolution:` + fmt.Sprintf("%v", this.RuntimeResolution) + `,`, + `}`, + }, "") + return s +} +func (this *UserContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserContainer{`, + `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "v1.Container", 1), `&`, ``, 1) + `,`, + `MirrorVolumeMounts:` + valueToStringGenerated(this.MirrorVolumeMounts) + `,`, + `}`, + }, "") + return s +} +func (this *ValueFrom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValueFrom{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `JSONPath:` + fmt.Sprintf("%v", this.JSONPath) + `,`, + `JQFilter:` + fmt.Sprintf("%v", this.JQFilter) + `,`, + `Parameter:` + fmt.Sprintf("%v", this.Parameter) + `,`, + `}`, + }, "") + return s +} +func (this *Workflow) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Workflow{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowSpec", "WorkflowSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "WorkflowStatus", "WorkflowStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Workflow", "Workflow", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowSpec) String() string { + if this == nil { + return "nil" + } + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&WorkflowSpec{`, + `Templates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Templates), "Template", "Template", 1), `&`, ``, 1) + `,`, + `Entrypoint:` + fmt.Sprintf("%v", this.Entrypoint) + `,`, + `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "v1.Volume", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, + `ArtifactRepositoryRef:` + strings.Replace(fmt.Sprintf("%v", this.ArtifactRepositoryRef), "ArtifactRepositoryRef", "ArtifactRepositoryRef", 1) + `,`, + `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "v1.Toleration", 1), `&`, ``, 1) + `,`, + `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "v1.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `HostNetwork:` + valueToStringGenerated(this.HostNetwork) + `,`, + `DNSPolicy:` + valueToStringGenerated(this.DNSPolicy) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "v1.PodDNSConfig", 1) + `,`, + `OnExit:` + fmt.Sprintf("%v", this.OnExit) + `,`, + `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `PodPriorityClassName:` + fmt.Sprintf("%v", this.PodPriorityClassName) + `,`, + `PodPriority:` + valueToStringGenerated(this.PodPriority) + `,`, + `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "v1.HostAlias", 1), `&`, ``, 1) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `PodGC:` + strings.Replace(fmt.Sprintf("%v", this.PodGC), "PodGC", "PodGC", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowStatus) String() string { + if this == nil { + return "nil" + } + keysForNodes := make([]string, 0, len(this.Nodes)) + for k := range this.Nodes { + keysForNodes = append(keysForNodes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodes) + mapStringForNodes := "map[string]NodeStatus{" + for _, k := range keysForNodes { + mapStringForNodes += fmt.Sprintf("%v: %v,", k, this.Nodes[k]) + } + mapStringForNodes += "}" + s := strings.Join([]string{`&WorkflowStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `CompressedNodes:` + fmt.Sprintf("%v", this.CompressedNodes) + `,`, + `Nodes:` + mapStringForNodes + `,`, + `PersistentVolumeClaims:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaims), "Volume", "v1.Volume", 1), `&`, ``, 1) + `,`, + `Outputs:` + strings.Replace(fmt.Sprintf("%v", this.Outputs), "Outputs", "Outputs", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowStep) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowStep{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Template:` + fmt.Sprintf("%v", this.Template) + `,`, + `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, + `TemplateRef:` + strings.Replace(fmt.Sprintf("%v", this.TemplateRef), "TemplateRef", "TemplateRef", 1) + `,`, + `WithItems:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.WithItems), "Item", "Item", 1), `&`, ``, 1) + `,`, + `WithParam:` + fmt.Sprintf("%v", this.WithParam) + `,`, + `WithSequence:` + strings.Replace(fmt.Sprintf("%v", this.WithSequence), "Sequence", "Sequence", 1) + `,`, + `When:` + fmt.Sprintf("%v", this.When) + `,`, + `ContinueOn:` + strings.Replace(fmt.Sprintf("%v", this.ContinueOn), "ContinueOn", "ContinueOn", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "WorkflowTemplateSpec", "WorkflowTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTemplateList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "WorkflowTemplate", "WorkflowTemplate", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WorkflowTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WorkflowTemplateSpec{`, + `Templates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Templates), "Template", "Template", 1), `&`, ``, 1) + `,`, + `Arguments:` + strings.Replace(strings.Replace(this.Arguments.String(), "Arguments", "Arguments", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ArchiveStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArchiveStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArchiveStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tar", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tar == nil { + m.Tar = &TarStrategy{} + } + if err := m.Tar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field None", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.None == nil { + m.None = &NoneStrategy{} + } + if err := m.None.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Arguments) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Arguments: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Arguments: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifacts = append(m.Artifacts, Artifact{}) + if err := m.Artifacts[len(m.Artifacts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Artifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Artifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Artifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactLocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ArtifactLocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GlobalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Archive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Archive == nil { + m.Archive = &ArchiveStrategy{} + } + if err := m.Archive.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Optional = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactLocation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactLocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactLocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ArchiveLogs = &b + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field S3", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.S3 == nil { + m.S3 = &S3Artifact{} + } + if err := m.S3.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitArtifact{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTPArtifact{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifactory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Artifactory == nil { + m.Artifactory = &ArtifactoryArtifact{} + } + if err := m.Artifactory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HDFS == nil { + m.HDFS = &HDFSArtifact{} + } + if err := m.HDFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Raw == nil { + m.Raw = &RawArtifact{} + } + if err := m.Raw.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactRepositoryRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactRepositoryRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactRepositoryRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigMap = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactoryArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactoryArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactoryArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactoryAuth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ArtifactoryAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtifactoryAuth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtifactoryAuth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtifactoryAuth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsernameSecret == nil { + m.UsernameSecret = &v1.SecretKeySelector{} + } + if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PasswordSecret == nil { + m.PasswordSecret = &v1.SecretKeySelector{} + } + if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContinueOn) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContinueOn: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContinueOn: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Error = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Failed = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DAGTask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DAGTask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DAGTask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Template = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TemplateRef == nil { + m.TemplateRef = &TemplateRef{} + } + if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dependencies = append(m.Dependencies, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WithItems = append(m.WithItems, Item{}) + if err := m.WithItems[len(m.WithItems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithParam", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WithParam = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithSequence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WithSequence == nil { + m.WithSequence = &Sequence{} + } + if err := m.WithSequence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field When", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.When = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContinueOn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContinueOn == nil { + m.ContinueOn = &ContinueOn{} + } + if err := m.ContinueOn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DAGTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DAGTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DAGTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, DAGTask{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailFast", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.FailFast = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Depth", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Depth = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fetch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fetch = append(m.Fetch, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernameSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsernameSecret == nil { + m.UsernameSecret = &v1.SecretKeySelector{} + } + if err := m.UsernameSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PasswordSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PasswordSecret == nil { + m.PasswordSecret = &v1.SecretKeySelector{} + } + if err := m.PasswordSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SSHPrivateKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SSHPrivateKeySecret == nil { + m.SSHPrivateKeySecret = &v1.SecretKeySelector{} + } + if err := m.SSHPrivateKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureIgnoreHostKey", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureIgnoreHostKey = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HDFSArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HDFSArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HDFSArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HDFSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HDFSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HDFSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HDFSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFSKrbConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HDFSKrbConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HDFSUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HDFSUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HDFSKrbConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HDFSKrbConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HDFSKrbConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbCCacheSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KrbCCacheSecret == nil { + m.KrbCCacheSecret = &v1.SecretKeySelector{} + } + if err := m.KrbCCacheSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbKeytabSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KrbKeytabSecret == nil { + m.KrbKeytabSecret = &v1.SecretKeySelector{} + } + if err := m.KrbKeytabSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KrbUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbRealm", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KrbRealm = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbConfigConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KrbConfigConfigMap == nil { + m.KrbConfigConfigMap = &v1.ConfigMapKeySelector{} + } + if err := m.KrbConfigConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbServicePrincipalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KrbServicePrincipalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Inputs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Inputs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Inputs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifacts = append(m.Artifacts, Artifact{}) + if err := m.Artifacts[len(m.Artifacts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Item) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Item: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Item: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NumVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NumVal = encoding_json.Number(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolVal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BoolVal = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MapVal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MapVal == nil { + m.MapVal = make(map[string]ItemValue) + } + var mapkey string + mapvalue := &ItemValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ItemValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MapVal[mapkey] = *mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListVal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ListVal = append(m.ListVal, ItemValue{}) + if err := m.ListVal[len(m.ListVal)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ItemValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ItemValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ItemValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NumVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NumVal = encoding_json.Number(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolVal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BoolVal = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MapVal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MapVal == nil { + m.MapVal = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MapVal[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListVal", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ListVal = append(m.ListVal, make([]byte, postIndex-iNdEx)) + copy(m.ListVal[len(m.ListVal)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TemplateRef == nil { + m.TemplateRef = &TemplateRef{} + } + if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NodePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BoundaryID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BoundaryID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Daemoned", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Daemoned = &b + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Inputs == nil { + m.Inputs = &Inputs{} + } + if err := m.Inputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Outputs == nil { + m.Outputs = &Outputs{} + } + if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Children = append(m.Children, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutboundNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OutboundNodes = append(m.OutboundNodes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NoneStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NoneStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NoneStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Outputs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Outputs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Outputs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artifacts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artifacts = append(m.Artifacts, Artifact{}) + if err := m.Artifacts[len(m.Artifacts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Result = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParallelSteps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParallelSteps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParallelSteps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Steps = append(m.Steps, WorkflowStep{}) + if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Parameter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Parameter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Parameter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Default = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueFrom == nil { + m.ValueFrom = &ValueFrom{} + } + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GlobalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodGC) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodGC: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodGC: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Strategy = PodGCStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MergeStrategy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MergeStrategy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manifest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manifest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SetOwnerReference", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SetOwnerReference = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessCondition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SuccessCondition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureCondition", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailureCondition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RetryStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetryStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetryStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Limit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *S3Artifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: S3Artifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: S3Artifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field S3Bucket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.S3Bucket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *S3Bucket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: S3Bucket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: S3Bucket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bucket", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bucket = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Region", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Region = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Insecure = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AccessKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScriptTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScriptTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScriptTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sequence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sequence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sequence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Count = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Start = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.End = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Format = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SuspendTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SuspendTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SuspendTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TarStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TarStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TarStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Template) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Template: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Template: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Template = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TemplateRef == nil { + m.TemplateRef = &TemplateRef{} + } + if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Inputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &v1.Affinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Daemon", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Daemon = &b + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Steps = append(m.Steps, ParallelSteps{}) + if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Container == nil { + m.Container = &v1.Container{} + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Script == nil { + m.Script = &ScriptTemplate{} + } + if err := m.Script.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceTemplate{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DAG", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DAG == nil { + m.DAG = &DAGTemplate{} + } + if err := m.DAG.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Suspend == nil { + m.Suspend = &SuspendTemplate{} + } + if err := m.Suspend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainers = append(m.InitContainers, UserContainer{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sidecars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sidecars = append(m.Sidecars, UserContainer{}) + if err := m.Sidecars[len(m.Sidecars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArchiveLocation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArchiveLocation == nil { + m.ArchiveLocation = &ArtifactLocation{} + } + if err := m.ArchiveLocation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RetryStrategy == nil { + m.RetryStrategy = &RetryStrategy{} + } + if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v1.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostAliases = append(m.HostAliases, v1.HostAlias{}) + if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 30: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &v1.PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Template = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeResolution", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RuntimeResolution = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserContainer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MirrorVolumeMounts", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.MirrorVolumeMounts = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValueFrom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueFrom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueFrom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JSONPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JQFilter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JQFilter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Workflow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Workflow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Workflow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Workflow{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Templates = append(m.Templates, Template{}) + if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entrypoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entrypoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, v1.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtifactRepositoryRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArtifactRepositoryRef == nil { + m.ArtifactRepositoryRef = &ArtifactRepositoryRef{} + } + if err := m.ArtifactRepositoryRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &v1.Affinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v1.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.HostNetwork = &b + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.DNSPolicy(dAtA[iNdEx:postIndex]) + m.DNSPolicy = &s + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &v1.PodDNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OnExit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OnExit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTLSecondsAfterFinished", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTLSecondsAfterFinished = &v + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodPriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodPriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodPriority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PodPriority = &v + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostAliases = append(m.HostAliases, v1.HostAlias{}) + if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &v1.PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodGC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodGC == nil { + m.PodGC = &PodGC{} + } + if err := m.PodGC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NodePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressedNodes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CompressedNodes = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Nodes == nil { + m.Nodes = make(map[string]NodeStatus) + } + var mapkey string + mapvalue := &NodeStatus{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &NodeStatus{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Nodes[mapkey] = *mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PersistentVolumeClaims = append(m.PersistentVolumeClaims, v1.Volume{}) + if err := m.PersistentVolumeClaims[len(m.PersistentVolumeClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Outputs == nil { + m.Outputs = &Outputs{} + } + if err := m.Outputs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowStep) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowStep: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowStep: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Template = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TemplateRef == nil { + m.TemplateRef = &TemplateRef{} + } + if err := m.TemplateRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WithItems = append(m.WithItems, Item{}) + if err := m.WithItems[len(m.WithItems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithParam", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WithParam = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WithSequence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WithSequence == nil { + m.WithSequence = &Sequence{} + } + if err := m.WithSequence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field When", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.When = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContinueOn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContinueOn == nil { + m.ContinueOn = &ContinueOn{} + } + if err := m.ContinueOn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, WorkflowTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Templates = append(m.Templates, Template{}) + if err := m.Templates[len(m.Templates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Arguments.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/pkg/apis/workflow/v1alpha1/generated.proto b/pkg/apis/workflow/v1alpha1/generated.proto new file mode 100644 index 000000000000..fd9c7c521b84 --- /dev/null +++ b/pkg/apis/workflow/v1alpha1/generated.proto @@ -0,0 +1,889 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package github.com.argoproj.argo.pkg.apis.workflow.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// ArchiveStrategy describes how to archive files/directory when saving artifacts +message ArchiveStrategy { + optional TarStrategy tar = 1; + + optional NoneStrategy none = 2; +} + +// Arguments to a template +message Arguments { + // Parameters is the list of parameters to pass to the template or workflow + repeated Parameter parameters = 1; + + // Artifacts is the list of artifacts to pass to the template or workflow + repeated Artifact artifacts = 2; +} + +// Artifact indicates an artifact to place at a specified path +message Artifact { + // name of the artifact. must be unique within a template's inputs/outputs. + optional string name = 1; + + // Path is the container path to the artifact + optional string path = 2; + + // mode bits to use on this file, must be a value between 0 and 0777 + // set when loading input artifacts. + optional int32 mode = 3; + + // From allows an artifact to reference an artifact from a previous step + optional string from = 4; + + // ArtifactLocation contains the location of the artifact + optional ArtifactLocation artifactLocation = 5; + + // GlobalName exports an output artifact to the global scope, making it available as + // '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts + optional string globalName = 6; + + // Archive controls how the artifact will be saved to the artifact repository. + optional ArchiveStrategy archive = 7; + + // Make Artifacts optional, if Artifacts doesn't generate or exist + optional bool optional = 8; +} + +// ArtifactLocation describes a location for a single or multiple artifacts. +// It is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname). +// It is also used to describe the location of multiple artifacts such as the archive location +// of a single workflow step, which the executor will use as a default location to store its files. +message ArtifactLocation { + // ArchiveLogs indicates if the container logs should be archived + optional bool archiveLogs = 1; + + // S3 contains S3 artifact location details + optional S3Artifact s3 = 2; + + // Git contains git artifact location details + optional GitArtifact git = 3; + + // HTTP contains HTTP artifact location details + optional HTTPArtifact http = 4; + + // Artifactory contains artifactory artifact location details + optional ArtifactoryArtifact artifactory = 5; + + // HDFS contains HDFS artifact location details + optional HDFSArtifact hdfs = 6; + + // Raw contains raw artifact location details + optional RawArtifact raw = 7; +} + +message ArtifactRepositoryRef { + optional string configMap = 1; + + optional string key = 2; +} + +// ArtifactoryArtifact is the location of an artifactory artifact +message ArtifactoryArtifact { + // URL of the artifact + optional string url = 1; + + optional ArtifactoryAuth artifactoryAuth = 2; +} + +// ArtifactoryAuth describes the secret selectors required for authenticating to artifactory +message ArtifactoryAuth { + // UsernameSecret is the secret selector to the repository username + optional k8s.io.api.core.v1.SecretKeySelector usernameSecret = 1; + + // PasswordSecret is the secret selector to the repository password + optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 2; +} + +// ContinueOn defines if a workflow should continue even if a task or step fails/errors. +// It can be specified if the workflow should continue when the pod errors, fails or both. +message ContinueOn { + // +optional + optional bool error = 1; + + // +optional + optional bool failed = 2; +} + +// DAGTask represents a node in the graph during DAG execution +message DAGTask { + // Name is the name of the target + optional string name = 1; + + // Name of template to execute + optional string template = 2; + + // Arguments are the parameter and artifact arguments to the template + optional Arguments arguments = 3; + + // TemplateRef is the reference to the template resource to execute. + optional TemplateRef templateRef = 4; + + // Dependencies are name of other targets which this depends on + repeated string dependencies = 5; + + // WithItems expands a task into multiple parallel tasks from the items in the list + repeated Item withItems = 6; + + // WithParam expands a task into multiple parallel tasks from the value in the parameter, + // which is expected to be a JSON list. + optional string withParam = 7; + + // WithSequence expands a task into a numeric sequence + optional Sequence withSequence = 8; + + // When is an expression in which the task should conditionally execute + optional string when = 9; + + // ContinueOn makes argo to proceed with the following step even if this step fails. + // Errors and Failed states can be specified + optional ContinueOn continueOn = 10; +} + +// DAGTemplate is a template subtype for directed acyclic graph templates +message DAGTemplate { + // Target are one or more names of targets to execute in a DAG + optional string target = 1; + + // Tasks are a list of DAG tasks + repeated DAGTask tasks = 2; + + // This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, + // as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed + // before failing the DAG itself. + // The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to + // completion (either success or failure), regardless of the failed outcomes of branches in the DAG. + // More info and example about this feature at https://github.com/argoproj/argo/issues/1442 + optional bool failFast = 3; +} + +// GitArtifact is the location of an git artifact +message GitArtifact { + // Repo is the git repository + optional string repo = 1; + + // Revision is the git commit, tag, branch to checkout + optional string revision = 2; + + // Depth specifies clones/fetches should be shallow and include the given + // number of commits from the branch tip + optional uint64 depth = 3; + + // Fetch specifies a number of refs that should be fetched before checkout + repeated string fetch = 4; + + // UsernameSecret is the secret selector to the repository username + optional k8s.io.api.core.v1.SecretKeySelector usernameSecret = 5; + + // PasswordSecret is the secret selector to the repository password + optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 6; + + // SSHPrivateKeySecret is the secret selector to the repository ssh private key + optional k8s.io.api.core.v1.SecretKeySelector sshPrivateKeySecret = 7; + + // InsecureIgnoreHostKey disables SSH strict host key checking during git clone + optional bool insecureIgnoreHostKey = 8; +} + +// HDFSArtifact is the location of an HDFS artifact +message HDFSArtifact { + optional HDFSConfig hDFSConfig = 1; + + // Path is a file path in HDFS + optional string path = 2; + + // Force copies a file forcibly even if it exists (default: false) + optional bool force = 3; +} + +// HDFSConfig is configurations for HDFS +message HDFSConfig { + optional HDFSKrbConfig hDFSKrbConfig = 1; + + // Addresses is accessible addresses of HDFS name nodes + repeated string addresses = 2; + + // HDFSUser is the user to access HDFS file system. + // It is ignored if either ccache or keytab is used. + optional string hdfsUser = 3; +} + +// HDFSKrbConfig is auth configurations for Kerberos +message HDFSKrbConfig { + // KrbCCacheSecret is the secret selector for Kerberos ccache + // Either ccache or keytab can be set to use Kerberos. + optional k8s.io.api.core.v1.SecretKeySelector krbCCacheSecret = 1; + + // KrbKeytabSecret is the secret selector for Kerberos keytab + // Either ccache or keytab can be set to use Kerberos. + optional k8s.io.api.core.v1.SecretKeySelector krbKeytabSecret = 2; + + // KrbUsername is the Kerberos username used with Kerberos keytab + // It must be set if keytab is used. + optional string krbUsername = 3; + + // KrbRealm is the Kerberos realm used with Kerberos keytab + // It must be set if keytab is used. + optional string krbRealm = 4; + + // KrbConfig is the configmap selector for Kerberos config as string + // It must be set if either ccache or keytab is used. + optional k8s.io.api.core.v1.ConfigMapKeySelector krbConfigConfigMap = 5; + + // KrbServicePrincipalName is the principal name of Kerberos service + // It must be set if either ccache or keytab is used. + optional string krbServicePrincipalName = 6; +} + +// HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container +message HTTPArtifact { + // URL of the artifact + optional string url = 1; +} + +// Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another +message Inputs { + // Parameters are a list of parameters passed as inputs + repeated Parameter parameters = 1; + + // Artifact are a list of artifacts passed as inputs + repeated Artifact artifacts = 2; +} + +// Item expands a single workflow step into multiple parallel steps +// The value of Item can be a map, string, bool, or number +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message Item { + optional int64 type = 1; + + optional string numVal = 2; + + optional bool boolVal = 3; + + optional string strVal = 4; + + map mapVal = 5; + + repeated ItemValue listVal = 6; +} + +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message ItemValue { + optional int64 type = 1; + + optional string numVal = 2; + + optional bool boolVal = 3; + + optional string strVal = 4; + + map mapVal = 5; + + repeated bytes listVal = 6; +} + +// Pod metdata +message Metadata { + map annotations = 1; + + map labels = 2; +} + +// NodeStatus contains status information about an individual node in the workflow +// +k8s:openapi-gen=false +message NodeStatus { + // ID is a unique identifier of a node within the worklow + // It is implemented as a hash of the node name, which makes the ID deterministic + optional string id = 1; + + // Name is unique name in the node tree used to generate the node ID + optional string name = 2; + + // DisplayName is a human readable representation of the node. Unique within a template boundary + optional string displayName = 3; + + // Type indicates type of node + optional string type = 4; + + // TemplateName is the template name which this node corresponds to. + // Not applicable to virtual nodes (e.g. Retry, StepGroup) + optional string templateName = 5; + + // TemplateRef is the reference to the template resource which this node corresponds to. + // Not applicable to virtual nodes (e.g. Retry, StepGroup) + optional TemplateRef templateRef = 6; + + // Phase a simple, high-level summary of where the node is in its lifecycle. + // Can be used as a state machine. + optional string phase = 7; + + // BoundaryID indicates the node ID of the associated template root node in which this node belongs to + optional string boundaryID = 8; + + // A human readable message indicating details about why the node is in this condition. + optional string message = 9; + + // Time at which this node started + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 10; + + // Time at which this node completed + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 11; + + // PodIP captures the IP of the pod for daemoned steps + optional string podIP = 12; + + // Daemoned tracks whether or not this node was daemoned and need to be terminated + optional bool daemoned = 13; + + // Inputs captures input parameter values and artifact locations supplied to this template invocation + optional Inputs inputs = 14; + + // Outputs captures output parameter values and artifact locations produced by this template invocation + optional Outputs outputs = 15; + + // Children is a list of child node IDs + repeated string children = 16; + + // OutboundNodes tracks the node IDs which are considered "outbound" nodes to a template invocation. + // For every invocation of a template, there are nodes which we considered as "outbound". Essentially, + // these are last nodes in the execution sequence to run, before the template is considered completed. + // These nodes are then connected as parents to a following step. + // + // In the case of single pod steps (i.e. container, script, resource templates), this list will be nil + // since the pod itself is already considered the "outbound" node. + // In the case of DAGs, outbound nodes are the "target" tasks (tasks with no children). + // In the case of steps, outbound nodes are all the containers involved in the last step group. + // NOTE: since templates are composable, the list of outbound nodes are carried upwards when + // a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of + // a template, will be a superset of the outbound nodes of its last children. + repeated string outboundNodes = 17; +} + +// NoneStrategy indicates to skip tar process and upload the files or directory tree as independent +// files. Note that if the artifact is a directory, the artifact driver must support the ability to +// save/load the directory appropriately. +message NoneStrategy { +} + +// Outputs hold parameters, artifacts, and results from a step +message Outputs { + // Parameters holds the list of output parameters produced by a step + repeated Parameter parameters = 1; + + // Artifacts holds the list of output artifacts produced by a step + repeated Artifact artifacts = 2; + + // Result holds the result (stdout) of a script template + optional string result = 3; +} + +message ParallelSteps { + repeated WorkflowStep steps = 1; +} + +// Parameter indicate a passed string parameter to a service template with an optional default value +message Parameter { + // Name is the parameter name + optional string name = 1; + + // Default is the default value to use for an input parameter if a value was not supplied + optional string default = 2; + + // Value is the literal value to use for the parameter. + // If specified in the context of an input parameter, the value takes precedence over any passed values + optional string value = 3; + + // ValueFrom is the source for the output parameter's value + optional ValueFrom valueFrom = 4; + + // GlobalName exports an output parameter to the global scope, making it available as + // '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters + optional string globalName = 5; +} + +// PodGC describes how to delete completed pods as they complete +message PodGC { + optional string strategy = 1; +} + +// RawArtifact allows raw string content to be placed as an artifact in a container +message RawArtifact { + // Data is the string contents of the artifact + optional string data = 1; +} + +// ResourceTemplate is a template subtype to manipulate kubernetes resources +message ResourceTemplate { + // Action is the action to perform to the resource. + // Must be one of: get, create, apply, delete, replace, patch + optional string action = 1; + + // MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" + // Must be one of: strategic, merge, json + optional string mergeStrategy = 2; + + // Manifest contains the kubernetes manifest + optional string manifest = 3; + + // SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource. + optional bool setOwnerReference = 4; + + // SuccessCondition is a label selector expression which describes the conditions + // of the k8s resource in which it is acceptable to proceed to the following step + optional string successCondition = 5; + + // FailureCondition is a label selector expression which describes the conditions + // of the k8s resource in which the step was considered failed + optional string failureCondition = 6; +} + +// RetryStrategy provides controls on how to retry a workflow step +message RetryStrategy { + // Limit is the maximum number of attempts when retrying a container + optional int32 limit = 1; +} + +// S3Artifact is the location of an S3 artifact +message S3Artifact { + optional S3Bucket s3Bucket = 1; + + // Key is the key in the bucket where the artifact resides + optional string key = 2; +} + +// S3Bucket contains the access information required for interfacing with an S3 bucket +message S3Bucket { + // Endpoint is the hostname of the bucket endpoint + optional string endpoint = 1; + + // Bucket is the name of the bucket + optional string bucket = 2; + + // Region contains the optional bucket region + optional string region = 3; + + // Insecure will connect to the service with TLS + optional bool insecure = 4; + + // AccessKeySecret is the secret selector to the bucket's access key + optional k8s.io.api.core.v1.SecretKeySelector accessKeySecret = 5; + + // SecretKeySecret is the secret selector to the bucket's secret key + optional k8s.io.api.core.v1.SecretKeySelector secretKeySecret = 6; +} + +// ScriptTemplate is a template subtype to enable scripting through code steps +message ScriptTemplate { + optional k8s.io.api.core.v1.Container container = 1; + + // Source contains the source code of the script to execute + optional string source = 2; +} + +// Sequence expands a workflow step into numeric range +message Sequence { + // Count is number of elements in the sequence (default: 0). Not to be used with end + optional string count = 1; + + // Number at which to start the sequence (default: 0) + optional string start = 2; + + // Number at which to end the sequence (default: 0). Not to be used with Count + optional string end = 3; + + // Format is a printf format string to format the value in the sequence + optional string format = 4; +} + +// SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time +message SuspendTemplate { +} + +// TarStrategy will tar and gzip the file or directory when saving +message TarStrategy { +} + +// Template is a reusable and composable unit of execution in a workflow +message Template { + // Name is the name of the template + optional string name = 1; + + // Template is the name of the template which is used as the base of this template. + optional string template = 2; + + // Arguments hold arguments to the template. + optional Arguments arguments = 3; + + // TemplateRef is the reference to the template resource which is used as the base of this template. + optional TemplateRef templateRef = 4; + + // Inputs describe what inputs parameters and artifacts are supplied to this template + optional Inputs inputs = 5; + + // Outputs describe the parameters and artifacts that this template produces + optional Outputs outputs = 6; + + // NodeSelector is a selector to schedule this step of the workflow to be + // run on the selected node(s). Overrides the selector set at the workflow level. + map nodeSelector = 7; + + // Affinity sets the pod's scheduling constraints + // Overrides the affinity set at the workflow level (if any) + optional k8s.io.api.core.v1.Affinity affinity = 8; + + // Metdata sets the pods's metadata, i.e. annotations and labels + optional Metadata metadata = 9; + + // Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness + optional bool daemon = 10; + + // Steps define a series of sequential/parallel workflow steps + repeated ParallelSteps steps = 11; + + // Container is the main container image to run in the pod + optional k8s.io.api.core.v1.Container container = 12; + + // Script runs a portion of code against an interpreter + optional ScriptTemplate script = 13; + + // Resource template subtype which can run k8s resources + optional ResourceTemplate resource = 14; + + // DAG template subtype which runs a DAG + optional DAGTemplate dag = 15; + + // Suspend template subtype which can suspend a workflow when reaching the step + optional SuspendTemplate suspend = 16; + + // Volumes is a list of volumes that can be mounted by containers in a template. + repeated k8s.io.api.core.v1.Volume volumes = 17; + + // InitContainers is a list of containers which run before the main container. + repeated UserContainer initContainers = 18; + + // Sidecars is a list of containers which run alongside the main container + // Sidecars are automatically killed when the main container completes + repeated UserContainer sidecars = 19; + + // Location in which all files related to the step will be stored (logs, artifacts, etc...). + // Can be overridden by individual items in Outputs. If omitted, will use the default + // artifact repository location configured in the controller, appended with the + // / in the key. + optional ArtifactLocation archiveLocation = 20; + + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // This field is only applicable to container and script templates. + optional int64 activeDeadlineSeconds = 21; + + // RetryStrategy describes how to retry a template when it fails + optional RetryStrategy retryStrategy = 22; + + // Parallelism limits the max total parallel pods that can execute at the same time within the + // boundaries of this template invocation. If additional steps/dag templates are invoked, the + // pods created by those templates will not be counted towards this total. + optional int64 parallelism = 23; + + // Tolerations to apply to workflow pods. + repeated k8s.io.api.core.v1.Toleration tolerations = 24; + + // If specified, the pod will be dispatched by specified scheduler. + // Or it will be dispatched by workflow scope scheduler if specified. + // If neither specified, the pod will be dispatched by default scheduler. + // +optional + optional string schedulerName = 25; + + // PriorityClassName to apply to workflow pods. + optional string priorityClassName = 26; + + // Priority to apply to workflow pods. + optional int32 priority = 27; + + // ServiceAccountName to apply to workflow pods + optional string serviceAccountName = 28; + + // HostAliases is an optional list of hosts and IPs that will be injected into the pod spec + repeated k8s.io.api.core.v1.HostAlias hostAliases = 29; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + optional k8s.io.api.core.v1.PodSecurityContext securityContext = 30; +} + +// TemplateRef is a reference of template resource. +message TemplateRef { + // Name is the resource name of the template. + optional string name = 1; + + // Template is the name of referred template in the resource. + optional string template = 2; + + // RuntimeResolution skips validation at creation time. + // By enabling this option, you can create the referred workflow template before the actual runtime. + optional bool runtimeResolution = 3; +} + +// UserContainer is a container specified by a user. +message UserContainer { + optional k8s.io.api.core.v1.Container container = 1; + + // MirrorVolumeMounts will mount the same volumes specified in the main container + // to the container (including artifacts), at the same mountPaths. This enables + // dind daemon to partially see the same filesystem as the main container in + // order to use features such as docker volume binding + optional bool mirrorVolumeMounts = 2; +} + +// ValueFrom describes a location in which to obtain the value to a parameter +message ValueFrom { + // Path in the container to retrieve an output parameter value from in container templates + optional string path = 1; + + // JSONPath of a resource to retrieve an output parameter value from in resource templates + optional string jsonPath = 2; + + // JQFilter expression against the resource object in resource templates + optional string jqFilter = 3; + + // Parameter reference to a step or dag task in which to retrieve an output parameter value from + // (e.g. '{{steps.mystep.outputs.myparam}}') + optional string parameter = 4; +} + +// Workflow is the definition of a workflow resource +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message Workflow { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional WorkflowSpec spec = 2; + + optional WorkflowStatus status = 3; +} + +// WorkflowList is list of Workflow resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Workflow items = 2; +} + +// WorkflowSpec is the specification of a Workflow. +message WorkflowSpec { + // Templates is a list of workflow templates used in a workflow + repeated Template templates = 1; + + // Entrypoint is a template reference to the starting point of the workflow + optional string entrypoint = 2; + + // Arguments contain the parameters and artifacts sent to the workflow entrypoint + // Parameters are referencable globally using the 'workflow' variable prefix. + // e.g. {{workflow.parameters.myparam}} + optional Arguments arguments = 3; + + // ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as. + optional string serviceAccountName = 4; + + // Volumes is a list of volumes that can be mounted by containers in a workflow. + repeated k8s.io.api.core.v1.Volume volumes = 5; + + // VolumeClaimTemplates is a list of claims that containers are allowed to reference. + // The Workflow controller will create the claims at the beginning of the workflow + // and delete the claims upon completion of the workflow + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 6; + + // Parallelism limits the max total parallel pods that can execute at the same time in a workflow + optional int64 parallelism = 7; + + // ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config. + optional ArtifactRepositoryRef artifactRepositoryRef = 8; + + // Suspend will suspend the workflow and prevent execution of any future steps in the workflow + optional bool suspend = 9; + + // NodeSelector is a selector which will result in all pods of the workflow + // to be scheduled on the selected node(s). This is able to be overridden by + // a nodeSelector specified in the template. + map nodeSelector = 10; + + // Affinity sets the scheduling constraints for all pods in the workflow. + // Can be overridden by an affinity specified in the template + optional k8s.io.api.core.v1.Affinity affinity = 11; + + // Tolerations to apply to workflow pods. + repeated k8s.io.api.core.v1.Toleration tolerations = 12; + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + repeated k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 13; + + // Host networking requested for this workflow pod. Default to false. + optional bool hostNetwork = 14; + + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + optional string dnsPolicy = 15; + + // PodDNSConfig defines the DNS parameters of a pod in addition to + // those generated from DNSPolicy. + optional k8s.io.api.core.v1.PodDNSConfig dnsConfig = 16; + + // OnExit is a template reference which is invoked at the end of the + // workflow, irrespective of the success, failure, or error of the + // primary workflow. + optional string onExit = 17; + + // TTLSecondsAfterFinished limits the lifetime of a Workflow that has finished execution + // (Succeeded, Failed, Error). If this field is set, once the Workflow finishes, it will be + // deleted after ttlSecondsAfterFinished expires. If this field is unset, + // ttlSecondsAfterFinished will not expire. If this field is set to zero, + // ttlSecondsAfterFinished expires immediately after the Workflow finishes. + optional int32 ttlSecondsAfterFinished = 18; + + // Optional duration in seconds relative to the workflow start time which the workflow is + // allowed to run before the controller terminates the workflow. A value of zero is used to + // terminate a Running workflow + optional int64 activeDeadlineSeconds = 19; + + // Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. + optional int32 priority = 20; + + // Set scheduler name for all pods. + // Will be overridden if container/script template's scheduler name is set. + // Default scheduler will be used if neither specified. + // +optional + optional string schedulerName = 21; + + // PodGC describes the strategy to use when to deleting completed pods + optional PodGC podGC = 26; + + // PriorityClassName to apply to workflow pods. + optional string podPriorityClassName = 22; + + // Priority to apply to workflow pods. + optional int32 podPriority = 23; + + // HostAliases is an optional list of hosts and IPs that will be injected into the pod spec + repeated k8s.io.api.core.v1.HostAlias hostAliases = 24; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + optional k8s.io.api.core.v1.PodSecurityContext securityContext = 25; +} + +// WorkflowStatus contains overall status information about a workflow +// +k8s:openapi-gen=false +message WorkflowStatus { + // Phase a simple, high-level summary of where the workflow is in its lifecycle. + optional string phase = 1; + + // Time at which this workflow started + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 2; + + // Time at which this workflow completed + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 3; + + // A human readable message indicating details about why the workflow is in this condition. + optional string message = 4; + + // Compressed and base64 decoded Nodes map + optional string compressedNodes = 5; + + // Nodes is a mapping between a node ID and the node's status. + map nodes = 6; + + // PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. + // The contents of this list are drained at the end of the workflow. + repeated k8s.io.api.core.v1.Volume persistentVolumeClaims = 7; + + // Outputs captures output values and artifact locations produced by the workflow via global outputs + optional Outputs outputs = 8; +} + +// WorkflowStep is a reference to a template to execute in a series of step +message WorkflowStep { + // Name of the step + optional string name = 1; + + // Template is the name of the template to execute as the step + optional string template = 2; + + // Arguments hold arguments to the template + optional Arguments arguments = 3; + + // TemplateRef is the reference to the template resource to execute as the step. + optional TemplateRef templateRef = 4; + + // WithItems expands a step into multiple parallel steps from the items in the list + repeated Item withItems = 5; + + // WithParam expands a step into multiple parallel steps from the value in the parameter, + // which is expected to be a JSON list. + optional string withParam = 6; + + // WithSequence expands a step into a numeric sequence + optional Sequence withSequence = 7; + + // When is an expression in which the step should conditionally execute + optional string when = 8; + + // ContinueOn makes argo to proceed with the following step even if this step fails. + // Errors and Failed states can be specified + optional ContinueOn continueOn = 9; +} + +// WorkflowTemplate is the definition of a workflow template resource +// +genclient +// +genclient:noStatus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowTemplate { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional WorkflowTemplateSpec spec = 2; +} + +// WorkflowTemplateList is list of WorkflowTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WorkflowTemplateList { + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated WorkflowTemplate items = 2; +} + +// WorkflowTemplateSpec is a spec of WorkflowTemplate. +message WorkflowTemplateSpec { + // Templates is a list of workflow templates. + repeated Template templates = 1; + + // Arguments hold arguments to the template. + optional Arguments arguments = 2; +} + diff --git a/pkg/apis/workflow/v1alpha1/item.go b/pkg/apis/workflow/v1alpha1/item.go new file mode 100644 index 000000000000..883ba701f2d0 --- /dev/null +++ b/pkg/apis/workflow/v1alpha1/item.go @@ -0,0 +1,176 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "strconv" +) + +// Type represents the stored type of Item. +type Type int + +const ( + Number Type = iota + String + Bool + Map + List +) + +// Item expands a single workflow step into multiple parallel steps +// The value of Item can be a map, string, bool, or number +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +type Item struct { + Type Type `protobuf:"bytes,1,opt,name=type,casttype=Type"` + NumVal json.Number `protobuf:"bytes,2,opt,name=numVal"` + BoolVal bool `protobuf:"bytes,3,opt,name=boolVal"` + StrVal string `protobuf:"bytes,4,opt,name=strVal"` + MapVal map[string]ItemValue `protobuf:"bytes,5,opt,name=mapVal"` + ListVal []ItemValue `protobuf:"bytes,6,opt,name=listVal"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (i *Item) UnmarshalJSON(value []byte) error { + strValue := string(value) + if _, err := strconv.Atoi(strValue); err == nil { + i.Type = Number + return json.Unmarshal(value, &i.NumVal) + } + + if _, err := strconv.ParseFloat(strValue, 64); err == nil { + i.Type = Number + return json.Unmarshal(value, &i.NumVal) + } + + if _, err := strconv.ParseBool(strValue); err == nil { + i.Type = Bool + return json.Unmarshal(value, &i.BoolVal) + } + if value[0] == '[' { + i.Type = List + err := json.Unmarshal(value, &i.ListVal) + fmt.Println(err) + return err + } + if value[0] == '{' { + i.Type = Map + return json.Unmarshal(value, &i.MapVal) + } + + i.Type = String + return json.Unmarshal(value, &i.StrVal) +} + +func (i *Item) String() string { + jsonBytes, err := json.Marshal(i) + if err != nil { + panic(err) + } + if i.Type == String { + // chop off the double quotes + return string(jsonBytes[1 : len(jsonBytes)-1]) + } + return string(jsonBytes) +} + +func (i Item) Format(s fmt.State, verb rune) { + fmt.Fprintf(s, i.String()) +} + +// MarshalJSON implements the json.Marshaller interface. +func (i Item) MarshalJSON() ([]byte, error) { + switch i.Type { + case String: + return json.Marshal(i.StrVal) + case Bool: + return json.Marshal(i.BoolVal) + case Number: + return json.Marshal(i.NumVal) + case Map: + return json.Marshal(i.MapVal) + case List: + return json.Marshal(i.ListVal) + default: + return []byte{}, fmt.Errorf("impossible Item.Type") + } +} + +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +type ItemValue struct { + Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"` + NumVal json.Number `protobuf:"bytes,2,opt,name=numVal"` + BoolVal bool `protobuf:"bytes,3,opt,name=boolVal"` + StrVal string `protobuf:"bytes,4,opt,name=strVal"` + MapVal map[string]string `protobuf:"bytes,5,opt,name=mapVal"` + ListVal []json.RawMessage `protobuf:"bytes,6,opt,name=listVal"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (iv *ItemValue) UnmarshalJSON(value []byte) error { + strValue := string(value) + if _, err := strconv.Atoi(strValue); err == nil { + iv.Type = Number + return json.Unmarshal(value, &iv.NumVal) + } + + if _, err := strconv.ParseFloat(strValue, 64); err == nil { + iv.Type = Number + return json.Unmarshal(value, &iv.NumVal) + } + + if _, err := strconv.ParseBool(strValue); err == nil { + iv.Type = Bool + return json.Unmarshal(value, &iv.BoolVal) + } + if value[0] == '[' { + iv.Type = List + err := json.Unmarshal(value, &iv.ListVal) + fmt.Println(err) + return err + } + if value[0] == '{' { + iv.Type = Map + return json.Unmarshal(value, &iv.MapVal) + } + + iv.Type = String + return json.Unmarshal(value, &iv.StrVal) + +} + +func (iv *ItemValue) String() string { + jsonBytes, err := json.Marshal(iv) + if err != nil { + panic(err) + } + if iv.Type == String { + // chop off the double quotes + return string(jsonBytes[1 : len(jsonBytes)-1]) + } + return string(jsonBytes) +} + +func (iv ItemValue) Format(s fmt.State, verb rune) { + fmt.Fprintf(s, iv.String()) +} + +// MarshalJSON implements the json.Marshaller interface. +func (iv ItemValue) MarshalJSON() ([]byte, error) { + switch iv.Type { + case String: + return json.Marshal(iv.StrVal) + case Bool: + return json.Marshal(iv.BoolVal) + case Number: + return json.Marshal(iv.NumVal) + case Map: + return json.Marshal(iv.MapVal) + default: + return []byte{}, fmt.Errorf("impossible ItemValue.Type") + } +} diff --git a/pkg/apis/workflow/v1alpha1/item_test.go b/pkg/apis/workflow/v1alpha1/item_test.go new file mode 100644 index 000000000000..842f4402c159 --- /dev/null +++ b/pkg/apis/workflow/v1alpha1/item_test.go @@ -0,0 +1,38 @@ +package v1alpha1 + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestItem(t *testing.T) { + testData := map[string]Type{ + "0": Number, + "3.141": Number, + "true": Bool, + "\"hello\"": String, + "{\"val\":\"123\"}": Map, + "[\"1\",\"2\",\"3\",\"4\",\"5\"]" : List, + } + + for data, expectedType := range testData { + var itm Item + err := json.Unmarshal([]byte(data), &itm) + assert.Nil(t, err) + assert.Equal(t, itm.Type, expectedType) + jsonBytes, err := json.Marshal(itm) + assert.Equal(t, string(data), string(jsonBytes)) + if itm.Type == String { + assert.Equal(t, string(data), fmt.Sprintf("\"%v\"", itm)) + assert.Equal(t, string(data), fmt.Sprintf("\"%s\"", itm)) + }else { + assert.Equal(t, string(data), fmt.Sprintf("%v", itm)) + assert.Equal(t, string(data), fmt.Sprintf("%s", itm)) + } + } +} + + diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 4bfb3ad9e349..5c7a2e4e8f83 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -30,9 +30,11 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HTTPArtifact": schema_pkg_apis_workflow_v1alpha1_HTTPArtifact(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs": schema_pkg_apis_workflow_v1alpha1_Inputs(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Item": schema_pkg_apis_workflow_v1alpha1_Item(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ItemValue": schema_pkg_apis_workflow_v1alpha1_ItemValue(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata": schema_pkg_apis_workflow_v1alpha1_Metadata(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.NoneStrategy": schema_pkg_apis_workflow_v1alpha1_NoneStrategy(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs": schema_pkg_apis_workflow_v1alpha1_Outputs(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ParallelSteps": schema_pkg_apis_workflow_v1alpha1_ParallelSteps(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Parameter": schema_pkg_apis_workflow_v1alpha1_Parameter(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.PodGC": schema_pkg_apis_workflow_v1alpha1_PodGC(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RawArtifact": schema_pkg_apis_workflow_v1alpha1_RawArtifact(ref), @@ -563,7 +565,7 @@ func schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref common.ReferenceCallback) SchemaProps: spec.SchemaProps{ Description: "Depth specifies clones/fetches should be shallow and include the given number of commits from the branch tip", Type: []string{"integer"}, - Format: "int32", + Format: "int64", }, }, "fetch": { @@ -908,6 +910,70 @@ func schema_pkg_apis_workflow_v1alpha1_Item(ref common.ReferenceCallback) common } } +func schema_pkg_apis_workflow_v1alpha1_ItemValue(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Type": { + SchemaProps: spec.SchemaProps{ + Type: []string{"integer"}, + Format: "int32", + }, + }, + "NumVal": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "BoolVal": { + SchemaProps: spec.SchemaProps{ + Type: []string{"boolean"}, + Format: "", + }, + }, + "StrVal": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "MapVal": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "ListVal": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "byte", + }, + }, + }, + }, + }, + }, + Required: []string{"Type", "NumVal", "BoolVal", "StrVal", "MapVal", "ListVal"}, + }, + }, + } +} + func schema_pkg_apis_workflow_v1alpha1_Metadata(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1008,6 +1074,33 @@ func schema_pkg_apis_workflow_v1alpha1_Outputs(ref common.ReferenceCallback) com } } +func schema_pkg_apis_workflow_v1alpha1_ParallelSteps(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Steps": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStep"), + }, + }, + }, + }, + }, + }, + Required: []string{"Steps"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStep"}, + } +} + func schema_pkg_apis_workflow_v1alpha1_Parameter(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1680,14 +1773,7 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStep"), - }, - }, - }, + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ParallelSteps"), }, }, }, @@ -1853,7 +1939,7 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactLocation", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ResourceTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ScriptTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.SuspendTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.TemplateRef", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.UserContainer", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStep", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactLocation", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ParallelSteps", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ResourceTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ScriptTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.SuspendTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.TemplateRef", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.UserContainer", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } @@ -2177,20 +2263,6 @@ func schema_pkg_apis_workflow_v1alpha1_Workflow(ref common.ReferenceCallback) co Description: "Workflow is the definition of a workflow resource", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, "metadata": { SchemaProps: spec.SchemaProps{ Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), diff --git a/pkg/apis/workflow/v1alpha1/workflow_template_types.go b/pkg/apis/workflow/v1alpha1/workflow_template_types.go index 61cde1b18831..7fa6e3df0afd 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_template_types.go +++ b/pkg/apis/workflow/v1alpha1/workflow_template_types.go @@ -10,16 +10,16 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type WorkflowTemplate struct { metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Spec WorkflowTemplateSpec `json:"spec"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Spec WorkflowTemplateSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` } // WorkflowTemplateList is list of WorkflowTemplate resources // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type WorkflowTemplateList struct { metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []WorkflowTemplate `json:"items"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []WorkflowTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` } var _ TemplateGetter = &WorkflowTemplate{} @@ -27,9 +27,9 @@ var _ TemplateGetter = &WorkflowTemplate{} // WorkflowTemplateSpec is a spec of WorkflowTemplate. type WorkflowTemplateSpec struct { // Templates is a list of workflow templates. - Templates []Template `json:"templates"` + Templates []Template `json:"templates" protobuf:"bytes,1,rep,name=templates"` // Arguments hold arguments to the template. - Arguments Arguments `json:"arguments,omitempty"` + Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,2,opt,name=arguments"` } // GetTemplateByName retrieves a defined template by its name diff --git a/pkg/apis/workflow/v1alpha1/workflow_types.go b/pkg/apis/workflow/v1alpha1/workflow_types.go index e92b1fffbcf4..3b838919dc7b 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_types.go +++ b/pkg/apis/workflow/v1alpha1/workflow_types.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "hash/fnv" - "strings" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -81,18 +80,18 @@ type TemplateHolder interface { // +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type Workflow struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec WorkflowSpec `json:"spec"` - Status WorkflowStatus `json:"status"` + metav1.TypeMeta `json:",inline "` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec "` + Status WorkflowStatus `json:"status" protobuf:"bytes,3,opt,name=status"` } // WorkflowList is list of Workflow resources // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type WorkflowList struct { metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - Items []Workflow `json:"items"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []Workflow `json:"items" protobuf:"bytes,2,opt,name=items"` } var _ TemplateGetter = &Workflow{} @@ -100,56 +99,56 @@ var _ TemplateGetter = &Workflow{} // WorkflowSpec is the specification of a Workflow. type WorkflowSpec struct { // Templates is a list of workflow templates used in a workflow - Templates []Template `json:"templates"` + Templates []Template `json:"templates" protobuf:"bytes,1,opt,name=templates"` // Entrypoint is a template reference to the starting point of the workflow - Entrypoint string `json:"entrypoint"` + Entrypoint string `json:"entrypoint" protobuf:"bytes,2,opt,name=entrypoint"` // Arguments contain the parameters and artifacts sent to the workflow entrypoint // Parameters are referencable globally using the 'workflow' variable prefix. // e.g. {{workflow.parameters.myparam}} - Arguments Arguments `json:"arguments,omitempty"` + Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` // ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as. - ServiceAccountName string `json:"serviceAccountName,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,4,opt,name=serviceAccountName"` // Volumes is a list of volumes that can be mounted by containers in a workflow. - Volumes []apiv1.Volume `json:"volumes,omitempty"` + Volumes []apiv1.Volume `json:"volumes,omitempty" protobuf:"bytes,5,opt,name=volumes"` // VolumeClaimTemplates is a list of claims that containers are allowed to reference. // The Workflow controller will create the claims at the beginning of the workflow // and delete the claims upon completion of the workflow - VolumeClaimTemplates []apiv1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` + VolumeClaimTemplates []apiv1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,6,opt,name=volumeClaimTemplates"` // Parallelism limits the max total parallel pods that can execute at the same time in a workflow - Parallelism *int64 `json:"parallelism,omitempty"` + Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,7,opt,name=parallelism"` // ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config. - ArtifactRepositoryRef *ArtifactRepositoryRef `json:"artifactRepositoryRef,omitempty"` + ArtifactRepositoryRef *ArtifactRepositoryRef `json:"artifactRepositoryRef,omitempty" protobuf:"bytes,8,opt,name=artifactRepositoryRef"` // Suspend will suspend the workflow and prevent execution of any future steps in the workflow - Suspend *bool `json:"suspend,omitempty"` + Suspend *bool `json:"suspend,omitempty" protobuf:"bytes,9,opt,name=suspend"` // NodeSelector is a selector which will result in all pods of the workflow // to be scheduled on the selected node(s). This is able to be overridden by // a nodeSelector specified in the template. - NodeSelector map[string]string `json:"nodeSelector,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,10,opt,name=nodeSelector"` // Affinity sets the scheduling constraints for all pods in the workflow. // Can be overridden by an affinity specified in the template - Affinity *apiv1.Affinity `json:"affinity,omitempty"` + Affinity *apiv1.Affinity `json:"affinity,omitempty" protobuf:"bytes,11,opt,name=affinity"` // Tolerations to apply to workflow pods. - Tolerations []apiv1.Toleration `json:"tolerations,omitempty"` + Tolerations []apiv1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,12,opt,name=tolerations"` // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - ImagePullSecrets []apiv1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + ImagePullSecrets []apiv1.LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,13,opt,name=imagePullSecrets"` // Host networking requested for this workflow pod. Default to false. - HostNetwork *bool `json:"hostNetwork,omitempty"` + HostNetwork *bool `json:"hostNetwork,omitempty" protobuf:"bytes,14,opt,name=hostNetwork"` // Set DNS policy for the pod. // Defaults to "ClusterFirst". @@ -157,162 +156,179 @@ type WorkflowSpec struct { // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. // To have DNS options set along with hostNetwork, you have to specify DNS policy // explicitly to 'ClusterFirstWithHostNet'. - DNSPolicy *apiv1.DNSPolicy `json:"dnsPolicy,omitempty"` + DNSPolicy *apiv1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,15,opt,name=dnsPolicy"` // PodDNSConfig defines the DNS parameters of a pod in addition to // those generated from DNSPolicy. - DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty"` + DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,16,opt,name=dnsConfig"` // OnExit is a template reference which is invoked at the end of the // workflow, irrespective of the success, failure, or error of the // primary workflow. - OnExit string `json:"onExit,omitempty"` + OnExit string `json:"onExit,omitempty" protobuf:"bytes,17,opt,name=onExit"` // TTLSecondsAfterFinished limits the lifetime of a Workflow that has finished execution // (Succeeded, Failed, Error). If this field is set, once the Workflow finishes, it will be // deleted after ttlSecondsAfterFinished expires. If this field is unset, // ttlSecondsAfterFinished will not expire. If this field is set to zero, // ttlSecondsAfterFinished expires immediately after the Workflow finishes. - TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty"` + TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"bytes,18,opt,name=ttlSecondsAfterFinished"` // Optional duration in seconds relative to the workflow start time which the workflow is // allowed to run before the controller terminates the workflow. A value of zero is used to // terminate a Running workflow - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"bytes,19,opt,name=activeDeadlineSeconds"` // Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first. - Priority *int32 `json:"priority,omitempty"` + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,20,opt,name=priority"` // Set scheduler name for all pods. // Will be overridden if container/script template's scheduler name is set. // Default scheduler will be used if neither specified. // +optional - SchedulerName string `json:"schedulerName,omitempty"` + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,21,opt,name=schedulerName"` // PodGC describes the strategy to use when to deleting completed pods - PodGC *PodGC `json:"podGC,omitempty"` + PodGC *PodGC `json:"podGC,omitempty" protobuf:"bytes,26,opt,name=podGC"` // PriorityClassName to apply to workflow pods. - PodPriorityClassName string `json:"podPriorityClassName,omitempty"` + PodPriorityClassName string `json:"podPriorityClassName,omitempty" protobuf:"bytes,22,opt,name=podPriorityClassName"` // Priority to apply to workflow pods. - PodPriority *int32 `json:"podPriority,omitempty"` + PodPriority *int32 `json:"podPriority,omitempty" protobuf:"bytes,23,opt,name=podPriority"` // HostAliases is an optional list of hosts and IPs that will be injected into the pod spec - HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty"` + HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" protobuf:"bytes,24,opt,name=hostAliases"` // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional - SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty"` + SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,25,opt,name=securityContext"` +} + +type ParallelSteps struct { + Steps []WorkflowStep `protobuf:"bytes,1,rep,name=steps"` +} + +func (p *ParallelSteps) UnmarshalJSON(value []byte) error { + err := json.Unmarshal(value, &p.Steps) + if err != nil { + return err + } + return nil +} + +func (p *ParallelSteps) MarshalJSON() ([]byte, error) { + fmt.Println(p.Steps) + return json.Marshal(p.Steps) } // Template is a reusable and composable unit of execution in a workflow type Template struct { // Name is the name of the template - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Template is the name of the template which is used as the base of this template. - Template string `json:"template,omitempty"` + Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` // Arguments hold arguments to the template. - Arguments Arguments `json:"arguments,omitempty"` + Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` // TemplateRef is the reference to the template resource which is used as the base of this template. - TemplateRef *TemplateRef `json:"templateRef,omitempty"` + TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"` // Inputs describe what inputs parameters and artifacts are supplied to this template - Inputs Inputs `json:"inputs,omitempty"` + Inputs Inputs `json:"inputs,omitempty" protobuf:"bytes,5,opt,name=inputs"` // Outputs describe the parameters and artifacts that this template produces - Outputs Outputs `json:"outputs,omitempty"` + Outputs Outputs `json:"outputs,omitempty" protobuf:"bytes,6,opt,name=outputs"` // NodeSelector is a selector to schedule this step of the workflow to be // run on the selected node(s). Overrides the selector set at the workflow level. - NodeSelector map[string]string `json:"nodeSelector,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,opt,name=nodeSelector"` // Affinity sets the pod's scheduling constraints // Overrides the affinity set at the workflow level (if any) - Affinity *apiv1.Affinity `json:"affinity,omitempty"` + Affinity *apiv1.Affinity `json:"affinity,omitempty" protobuf:"bytes,8,opt,name=affinity"` // Metdata sets the pods's metadata, i.e. annotations and labels - Metadata Metadata `json:"metadata,omitempty"` + Metadata Metadata `json:"metadata,omitempty" protobuf:"bytes,9,opt,name=metadata"` // Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness - Daemon *bool `json:"daemon,omitempty"` + Daemon *bool `json:"daemon,omitempty" protobuf:"bytes,10,opt,name=daemon"` // Steps define a series of sequential/parallel workflow steps - Steps [][]WorkflowStep `json:"steps,omitempty"` + Steps []ParallelSteps `json:"steps,omitempty" protobuf:"bytes,11,opt,name=steps"` // Container is the main container image to run in the pod - Container *apiv1.Container `json:"container,omitempty"` + Container *apiv1.Container `json:"container,omitempty" protobuf:"bytes,12,opt,name=container"` // Script runs a portion of code against an interpreter - Script *ScriptTemplate `json:"script,omitempty"` + Script *ScriptTemplate `json:"script,omitempty" protobuf:"bytes,13,opt,name=script"` // Resource template subtype which can run k8s resources - Resource *ResourceTemplate `json:"resource,omitempty"` + Resource *ResourceTemplate `json:"resource,omitempty" protobuf:"bytes,14,opt,name=resource"` // DAG template subtype which runs a DAG - DAG *DAGTemplate `json:"dag,omitempty"` + DAG *DAGTemplate `json:"dag,omitempty" protobuf:"bytes,15,opt,name=dag"` // Suspend template subtype which can suspend a workflow when reaching the step - Suspend *SuspendTemplate `json:"suspend,omitempty"` + Suspend *SuspendTemplate `json:"suspend,omitempty" protobuf:"bytes,16,opt,name=suspend"` // Volumes is a list of volumes that can be mounted by containers in a template. - Volumes []apiv1.Volume `json:"volumes,omitempty"` + Volumes []apiv1.Volume `json:"volumes,omitempty" protobuf:"bytes,17,opt,name=volumes"` // InitContainers is a list of containers which run before the main container. - InitContainers []UserContainer `json:"initContainers,omitempty"` + InitContainers []UserContainer `json:"initContainers,omitempty" protobuf:"bytes,18,opt,name=initContainers"` // Sidecars is a list of containers which run alongside the main container // Sidecars are automatically killed when the main container completes - Sidecars []UserContainer `json:"sidecars,omitempty"` + Sidecars []UserContainer `json:"sidecars,omitempty" protobuf:"bytes,19,opt,name=sidecars"` // Location in which all files related to the step will be stored (logs, artifacts, etc...). // Can be overridden by individual items in Outputs. If omitted, will use the default // artifact repository location configured in the controller, appended with the // / in the key. - ArchiveLocation *ArtifactLocation `json:"archiveLocation,omitempty"` + ArchiveLocation *ArtifactLocation `json:"archiveLocation,omitempty" protobuf:"bytes,20,opt,name=archiveLocation"` // Optional duration in seconds relative to the StartTime that the pod may be active on a node // before the system actively tries to terminate the pod; value must be positive integer // This field is only applicable to container and script templates. - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"bytes,21,opt,name=activeDeadlineSeconds"` // RetryStrategy describes how to retry a template when it fails - RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty"` + RetryStrategy *RetryStrategy `json:"retryStrategy,omitempty" protobuf:"bytes,22,opt,name=retryStrategy"` // Parallelism limits the max total parallel pods that can execute at the same time within the // boundaries of this template invocation. If additional steps/dag templates are invoked, the // pods created by those templates will not be counted towards this total. - Parallelism *int64 `json:"parallelism,omitempty"` + Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,23,opt,name=parallelism"` // Tolerations to apply to workflow pods. - Tolerations []apiv1.Toleration `json:"tolerations,omitempty"` + Tolerations []apiv1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,24,opt,name=tolerations"` // If specified, the pod will be dispatched by specified scheduler. // Or it will be dispatched by workflow scope scheduler if specified. // If neither specified, the pod will be dispatched by default scheduler. // +optional - SchedulerName string `json:"schedulerName,omitempty"` + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,25,opt,name=schedulerName"` // PriorityClassName to apply to workflow pods. - PriorityClassName string `json:"priorityClassName,omitempty"` + PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,26,opt,name=priorityClassName"` // Priority to apply to workflow pods. - Priority *int32 `json:"priority,omitempty"` + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,27,opt,name=priority"` // ServiceAccountName to apply to workflow pods - ServiceAccountName string `json:"serviceAccountName,omitempty"` + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,28,opt,name=serviceAccountName"` // HostAliases is an optional list of hosts and IPs that will be injected into the pod spec - HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty"` + HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" protobuf:"bytes,29,opt,name=hostAliases"` // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional - SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty"` + SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,30,opt,name=securityContext"` } var _ TemplateHolder = &Template{} @@ -332,92 +348,92 @@ func (tmpl *Template) GetTemplateRef() *TemplateRef { // Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another type Inputs struct { // Parameters are a list of parameters passed as inputs - Parameters []Parameter `json:"parameters,omitempty"` + Parameters []Parameter `json:"parameters,omitempty" protobuf:"bytes,1,opt,name=parameters"` // Artifact are a list of artifacts passed as inputs - Artifacts []Artifact `json:"artifacts,omitempty"` + Artifacts []Artifact `json:"artifacts,omitempty" protobuf:"bytes,2,opt,name=artifacts"` } // Pod metdata type Metadata struct { - Annotations map[string]string `json:"annotations,omitempty"` - Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,1,opt,name=annotations"` + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,2,opt,name=labels"` } // Parameter indicate a passed string parameter to a service template with an optional default value type Parameter struct { // Name is the parameter name - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Default is the default value to use for an input parameter if a value was not supplied - Default *string `json:"default,omitempty"` + Default *string `json:"default,omitempty" protobuf:"bytes,2,opt,name=default"` // Value is the literal value to use for the parameter. // If specified in the context of an input parameter, the value takes precedence over any passed values - Value *string `json:"value,omitempty"` + Value *string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` // ValueFrom is the source for the output parameter's value - ValueFrom *ValueFrom `json:"valueFrom,omitempty"` + ValueFrom *ValueFrom `json:"valueFrom,omitempty" protobuf:"bytes,4,opt,name=valueFrom"` // GlobalName exports an output parameter to the global scope, making it available as // '{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters - GlobalName string `json:"globalName,omitempty"` + GlobalName string `json:"globalName,omitempty" protobuf:"bytes,5,opt,name=globalName"` } // ValueFrom describes a location in which to obtain the value to a parameter type ValueFrom struct { // Path in the container to retrieve an output parameter value from in container templates - Path string `json:"path,omitempty"` + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` // JSONPath of a resource to retrieve an output parameter value from in resource templates - JSONPath string `json:"jsonPath,omitempty"` + JSONPath string `json:"jsonPath,omitempty" protobuf:"bytes,2,opt,name=jsonPath"` // JQFilter expression against the resource object in resource templates - JQFilter string `json:"jqFilter,omitempty"` + JQFilter string `json:"jqFilter,omitempty" protobuf:"bytes,3,opt,name=jqFilter"` // Parameter reference to a step or dag task in which to retrieve an output parameter value from // (e.g. '{{steps.mystep.outputs.myparam}}') - Parameter string `json:"parameter,omitempty"` + Parameter string `json:"parameter,omitempty" protobuf:"bytes,4,opt,name=parameter"` } // Artifact indicates an artifact to place at a specified path type Artifact struct { // name of the artifact. must be unique within a template's inputs/outputs. - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Path is the container path to the artifact - Path string `json:"path,omitempty"` + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // mode bits to use on this file, must be a value between 0 and 0777 // set when loading input artifacts. - Mode *int32 `json:"mode,omitempty"` + Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` // From allows an artifact to reference an artifact from a previous step - From string `json:"from,omitempty"` + From string `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"` // ArtifactLocation contains the location of the artifact - ArtifactLocation `json:",inline"` + ArtifactLocation `json:",inline" protobuf:"bytes,5,opt,name=artifactLocation"` // GlobalName exports an output artifact to the global scope, making it available as // '{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts - GlobalName string `json:"globalName,omitempty"` + GlobalName string `json:"globalName,omitempty" protobuf:"bytes,6,opt,name=globalName"` // Archive controls how the artifact will be saved to the artifact repository. - Archive *ArchiveStrategy `json:"archive,omitempty"` + Archive *ArchiveStrategy `json:"archive,omitempty" protobuf:"bytes,7,opt,name=archive"` // Make Artifacts optional, if Artifacts doesn't generate or exist - Optional bool `json:"optional,omitempty"` + Optional bool `json:"optional,omitempty" protobuf:"varint,8,opt,name=optional"` } // PodGC describes how to delete completed pods as they complete type PodGC struct { - Strategy PodGCStrategy `json:"strategy,omitempty"` + Strategy PodGCStrategy `json:"strategy,omitempty" protobuf:"bytes,1,opt,name=strategy,casttype=PodGCStrategy"` } // ArchiveStrategy describes how to archive files/directory when saving artifacts type ArchiveStrategy struct { - Tar *TarStrategy `json:"tar,omitempty"` - None *NoneStrategy `json:"none,omitempty"` + Tar *TarStrategy `json:"tar,omitempty" protobuf:"bytes,1,opt,name=tar"` + None *NoneStrategy `json:"none,omitempty" protobuf:"bytes,2,opt,name=none"` } // TarStrategy will tar and gzip the file or directory when saving @@ -434,74 +450,74 @@ type NoneStrategy struct{} // of a single workflow step, which the executor will use as a default location to store its files. type ArtifactLocation struct { // ArchiveLogs indicates if the container logs should be archived - ArchiveLogs *bool `json:"archiveLogs,omitempty"` + ArchiveLogs *bool `json:"archiveLogs,omitempty" protobuf:"varint,1,opt,name=archiveLogs"` // S3 contains S3 artifact location details - S3 *S3Artifact `json:"s3,omitempty"` + S3 *S3Artifact `json:"s3,omitempty" protobuf:"bytes,2,opt,name=s3"` // Git contains git artifact location details - Git *GitArtifact `json:"git,omitempty"` + Git *GitArtifact `json:"git,omitempty" protobuf:"bytes,3,opt,name=git"` // HTTP contains HTTP artifact location details - HTTP *HTTPArtifact `json:"http,omitempty"` + HTTP *HTTPArtifact `json:"http,omitempty" protobuf:"bytes,4,opt,name=http"` // Artifactory contains artifactory artifact location details - Artifactory *ArtifactoryArtifact `json:"artifactory,omitempty"` + Artifactory *ArtifactoryArtifact `json:"artifactory,omitempty" protobuf:"bytes,5,opt,name=artifactory"` // HDFS contains HDFS artifact location details - HDFS *HDFSArtifact `json:"hdfs,omitempty"` + HDFS *HDFSArtifact `json:"hdfs,omitempty" protobuf:"bytes,6,opt,name=hdfs"` // Raw contains raw artifact location details - Raw *RawArtifact `json:"raw,omitempty"` + Raw *RawArtifact `json:"raw,omitempty" protobuf:"bytes,7,opt,name=raw"` } type ArtifactRepositoryRef struct { - ConfigMap string `json:"configMap,omitempty"` - Key string `json:"key,omitempty"` + ConfigMap string `json:"configMap,omitempty" protobuf:"bytes,1,opt,name=configMap"` + Key string `json:"key,omitempty" protobuf:"bytes,2,opt,name=key"` } // Outputs hold parameters, artifacts, and results from a step type Outputs struct { // Parameters holds the list of output parameters produced by a step - Parameters []Parameter `json:"parameters,omitempty"` + Parameters []Parameter `json:"parameters,omitempty" protobuf:"bytes,1,rep,name=parameters"` // Artifacts holds the list of output artifacts produced by a step - Artifacts []Artifact `json:"artifacts,omitempty"` + Artifacts []Artifact `json:"artifacts,omitempty" protobuf:"bytes,2,rep,name=artifacts"` // Result holds the result (stdout) of a script template - Result *string `json:"result,omitempty"` + Result *string `json:"result,omitempty" protobuf:"bytes,3,opt,name=result"` } // WorkflowStep is a reference to a template to execute in a series of step type WorkflowStep struct { // Name of the step - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // Template is the name of the template to execute as the step - Template string `json:"template,omitempty"` + Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` // Arguments hold arguments to the template - Arguments Arguments `json:"arguments,omitempty"` + Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` // TemplateRef is the reference to the template resource to execute as the step. - TemplateRef *TemplateRef `json:"templateRef,omitempty"` + TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"` // WithItems expands a step into multiple parallel steps from the items in the list - WithItems []Item `json:"withItems,omitempty"` + WithItems []Item `json:"withItems,omitempty" protobuf:"bytes,5,rep,name=withItems"` // WithParam expands a step into multiple parallel steps from the value in the parameter, // which is expected to be a JSON list. - WithParam string `json:"withParam,omitempty"` + WithParam string `json:"withParam,omitempty" protobuf:"bytes,6,opt,name=withParam"` // WithSequence expands a step into a numeric sequence - WithSequence *Sequence `json:"withSequence,omitempty"` + WithSequence *Sequence `json:"withSequence,omitempty" protobuf:"bytes,7,opt,name=withSequence"` // When is an expression in which the step should conditionally execute - When string `json:"when,omitempty"` + When string `json:"when,omitempty" protobuf:"bytes,8,opt,name=when"` // ContinueOn makes argo to proceed with the following step even if this step fails. // Errors and Failed states can be specified - ContinueOn *ContinueOn `json:"continueOn,omitempty"` + ContinueOn *ContinueOn `json:"continueOn,omitempty" protobuf:"bytes,9,opt,name=continueOn"` } var _ TemplateHolder = &WorkflowStep{} @@ -514,25 +530,20 @@ func (step *WorkflowStep) GetTemplateRef() *TemplateRef { return step.TemplateRef } -// Item expands a single workflow step into multiple parallel steps -// The value of Item can be a map, string, bool, or number -type Item struct { - Value interface{} `json:"value,omitempty"` -} // Sequence expands a workflow step into numeric range type Sequence struct { // Count is number of elements in the sequence (default: 0). Not to be used with end - Count string `json:"count,omitempty"` + Count string `json:"count,omitempty" protobuf:"bytes,1,opt,name=count"` // Number at which to start the sequence (default: 0) - Start string `json:"start,omitempty"` + Start string `json:"start,omitempty" protobuf:"bytes,2,opt,name=start"` // Number at which to end the sequence (default: 0). Not to be used with Count - End string `json:"end,omitempty"` + End string `json:"end,omitempty" protobuf:"bytes,3,opt,name=end"` // Format is a printf format string to format the value in the sequence - Format string `json:"format,omitempty"` + Format string `json:"format,omitempty" protobuf:"bytes,4,opt,name=format"` } // DeepCopyInto is an custom deepcopy function to deal with our use of the interface{} type @@ -547,16 +558,6 @@ func (i *Item) DeepCopyInto(out *Item) { } } -// UnmarshalJSON implements the json.Unmarshaller interface. -func (i *Item) UnmarshalJSON(value []byte) error { - return json.Unmarshal(value, &i.Value) -} - -// MarshalJSON implements the json.Marshaller interface. -func (i Item) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Value) -} - // OpenAPISchemaType is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators @@ -569,12 +570,12 @@ func (i Item) OpenAPISchemaFormat() string { return "item" } // TemplateRef is a reference of template resource. type TemplateRef struct { // Name is the resource name of the template. - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // Template is the name of referred template in the resource. - Template string `json:"template,omitempty"` + Template string `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` // RuntimeResolution skips validation at creation time. // By enabling this option, you can create the referred workflow template before the actual runtime. - RuntimeResolution bool `json:"runtimeResolution,omitempty"` + RuntimeResolution bool `json:"runtimeResolution,omitempty" protobuf:"varint,3,opt,name=runtimeResolution"` } type ArgumentsProvider interface { @@ -585,58 +586,58 @@ type ArgumentsProvider interface { // Arguments to a template type Arguments struct { // Parameters is the list of parameters to pass to the template or workflow - Parameters []Parameter `json:"parameters,omitempty"` + Parameters []Parameter `json:"parameters,omitempty" protobuf:"bytes,1,rep,name=parameters"` // Artifacts is the list of artifacts to pass to the template or workflow - Artifacts []Artifact `json:"artifacts,omitempty"` + Artifacts []Artifact `json:"artifacts,omitempty" protobuf:"bytes,2,rep,name=artifacts"` } var _ ArgumentsProvider = &Arguments{} // UserContainer is a container specified by a user. type UserContainer struct { - apiv1.Container `json:",inline"` + apiv1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"` // MirrorVolumeMounts will mount the same volumes specified in the main container // to the container (including artifacts), at the same mountPaths. This enables // dind daemon to partially see the same filesystem as the main container in // order to use features such as docker volume binding - MirrorVolumeMounts *bool `json:"mirrorVolumeMounts,omitempty"` + MirrorVolumeMounts *bool `json:"mirrorVolumeMounts,omitempty" protobuf:"varint,2,opt,name=mirrorVolumeMounts"` } // WorkflowStatus contains overall status information about a workflow // +k8s:openapi-gen=false type WorkflowStatus struct { // Phase a simple, high-level summary of where the workflow is in its lifecycle. - Phase NodePhase `json:"phase,omitempty"` + Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NodePhase"` // Time at which this workflow started - StartedAt metav1.Time `json:"startedAt,omitempty"` + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,2,opt,name=startedAt"` // Time at which this workflow completed - FinishedAt metav1.Time `json:"finishedAt,omitempty"` + FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,3,opt,name=finishedAt"` // A human readable message indicating details about why the workflow is in this condition. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` // Compressed and base64 decoded Nodes map - CompressedNodes string `json:"compressedNodes,omitempty"` + CompressedNodes string `json:"compressedNodes,omitempty" protobuf:"bytes,5,opt,name=compressedNodes"` // Nodes is a mapping between a node ID and the node's status. - Nodes map[string]NodeStatus `json:"nodes,omitempty"` + Nodes map[string]NodeStatus `json:"nodes,omitempty" protobuf:"bytes,6,rep,name=nodes"` // PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. // The contents of this list are drained at the end of the workflow. - PersistentVolumeClaims []apiv1.Volume `json:"persistentVolumeClaims,omitempty"` + PersistentVolumeClaims []apiv1.Volume `json:"persistentVolumeClaims,omitempty" protobuf:"bytes,7,rep,name=persistentVolumeClaims"` // Outputs captures output values and artifact locations produced by the workflow via global outputs - Outputs *Outputs `json:"outputs,omitempty"` + Outputs *Outputs `json:"outputs,omitempty" protobuf:"bytes,8,opt,name=outputs"` } // RetryStrategy provides controls on how to retry a workflow step type RetryStrategy struct { // Limit is the maximum number of attempts when retrying a container - Limit *int32 `json:"limit,omitempty"` + Limit *int32 `json:"limit,omitempty" protobuf:"varint,1,opt,name=limit"` } // NodeStatus contains status information about an individual node in the workflow @@ -644,55 +645,55 @@ type RetryStrategy struct { type NodeStatus struct { // ID is a unique identifier of a node within the worklow // It is implemented as a hash of the node name, which makes the ID deterministic - ID string `json:"id"` + ID string `json:"id" protobuf:"bytes,1,opt,name=id"` // Name is unique name in the node tree used to generate the node ID - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` // DisplayName is a human readable representation of the node. Unique within a template boundary - DisplayName string `json:"displayName"` + DisplayName string `json:"displayName" protobuf:"bytes,3,opt,name=displayName"` // Type indicates type of node - Type NodeType `json:"type"` + Type NodeType `json:"type" protobuf:"bytes,4,opt,name=type,casttype=NodeType"` // TemplateName is the template name which this node corresponds to. // Not applicable to virtual nodes (e.g. Retry, StepGroup) - TemplateName string `json:"templateName,omitempty"` + TemplateName string `json:"templateName,omitempty" protobuf:"bytes,5,opt,name=templateName"` // TemplateRef is the reference to the template resource which this node corresponds to. // Not applicable to virtual nodes (e.g. Retry, StepGroup) - TemplateRef *TemplateRef `json:"templateRef,omitempty"` + TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,6,opt,name=templateRef"` // Phase a simple, high-level summary of where the node is in its lifecycle. // Can be used as a state machine. - Phase NodePhase `json:"phase,omitempty"` + Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,7,opt,name=phase,casttype=NodePhase"` // BoundaryID indicates the node ID of the associated template root node in which this node belongs to - BoundaryID string `json:"boundaryID,omitempty"` + BoundaryID string `json:"boundaryID,omitempty" protobuf:"bytes,8,opt,name=boundaryID"` // A human readable message indicating details about why the node is in this condition. - Message string `json:"message,omitempty"` + Message string `json:"message,omitempty" protobuf:"bytes,9,opt,name=message"` // Time at which this node started - StartedAt metav1.Time `json:"startedAt,omitempty"` + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,10,opt,name=startedAt"` // Time at which this node completed - FinishedAt metav1.Time `json:"finishedAt,omitempty"` + FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,11,opt,name=finishedAt"` // PodIP captures the IP of the pod for daemoned steps - PodIP string `json:"podIP,omitempty"` + PodIP string `json:"podIP,omitempty" protobuf:"bytes,12,opt,name=podIP"` // Daemoned tracks whether or not this node was daemoned and need to be terminated - Daemoned *bool `json:"daemoned,omitempty"` + Daemoned *bool `json:"daemoned,omitempty" protobuf:"varint,13,opt,name=daemoned"` // Inputs captures input parameter values and artifact locations supplied to this template invocation - Inputs *Inputs `json:"inputs,omitempty"` + Inputs *Inputs `json:"inputs,omitempty" protobuf:"bytes,14,opt,name=inputs"` // Outputs captures output parameter values and artifact locations produced by this template invocation - Outputs *Outputs `json:"outputs,omitempty"` + Outputs *Outputs `json:"outputs,omitempty" protobuf:"bytes,15,opt,name=outputs"` // Children is a list of child node IDs - Children []string `json:"children,omitempty"` + Children []string `json:"children,omitempty" protobuf:"bytes,16,rep,name=children"` // OutboundNodes tracks the node IDs which are considered "outbound" nodes to a template invocation. // For every invocation of a template, there are nodes which we considered as "outbound". Essentially, @@ -706,7 +707,7 @@ type NodeStatus struct { // NOTE: since templates are composable, the list of outbound nodes are carried upwards when // a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of // a template, will be a superset of the outbound nodes of its last children. - OutboundNodes []string `json:"outboundNodes,omitempty"` + OutboundNodes []string `json:"outboundNodes,omitempty" protobuf:"bytes,17,rep,name=outboundNodes"` } var _ TemplateHolder = &NodeStatus{} @@ -719,9 +720,9 @@ func (n *NodeStatus) GetTemplateRef() *TemplateRef { return n.TemplateRef } -func (n NodeStatus) String() string { - return fmt.Sprintf("%s (%s)", n.Name, n.ID) -} +//func (n NodeStatus) String() string { +// return fmt.Sprintf("%s (%s)", n.Name, n.ID) +//} func isCompletedPhase(phase NodePhase) bool { return phase == NodeSucceeded || @@ -767,39 +768,39 @@ func (n NodeStatus) CanRetry() bool { // S3Bucket contains the access information required for interfacing with an S3 bucket type S3Bucket struct { // Endpoint is the hostname of the bucket endpoint - Endpoint string `json:"endpoint"` + Endpoint string `json:"endpoint" protobuf:"bytes,1,opt,name=endpoint"` // Bucket is the name of the bucket - Bucket string `json:"bucket"` + Bucket string `json:"bucket" protobuf:"bytes,2,opt,name=bucket"` // Region contains the optional bucket region - Region string `json:"region,omitempty"` + Region string `json:"region,omitempty" protobuf:"bytes,3,opt,name=region"` // Insecure will connect to the service with TLS - Insecure *bool `json:"insecure,omitempty"` + Insecure *bool `json:"insecure,omitempty" protobuf:"varint,4,opt,name=insecure"` // AccessKeySecret is the secret selector to the bucket's access key - AccessKeySecret apiv1.SecretKeySelector `json:"accessKeySecret"` + AccessKeySecret apiv1.SecretKeySelector `json:"accessKeySecret" protobuf:"bytes,5,opt,name=accessKeySecret"` // SecretKeySecret is the secret selector to the bucket's secret key - SecretKeySecret apiv1.SecretKeySelector `json:"secretKeySecret"` + SecretKeySecret apiv1.SecretKeySelector `json:"secretKeySecret" protobuf:"bytes,6,opt,name=secretKeySecret"` } // S3Artifact is the location of an S3 artifact type S3Artifact struct { - S3Bucket `json:",inline"` + S3Bucket `json:",inline" protobuf:"bytes,1,opt,name=s3Bucket"` // Key is the key in the bucket where the artifact resides - Key string `json:"key"` + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` } -func (s *S3Artifact) String() string { - protocol := "https" - if s.Insecure != nil && *s.Insecure { - protocol = "http" - } - return fmt.Sprintf("%s://%s/%s/%s", protocol, s.Endpoint, s.Bucket, s.Key) -} +//func (s *S3Artifact) String() string { +// protocol := "https" +// if s.Insecure != nil && *s.Insecure { +// protocol = "http" +// } +// return fmt.Sprintf("%s://%s/%s/%s", protocol, s.Endpoint, s.Bucket, s.Key) +//} func (s *S3Artifact) HasLocation() bool { return s != nil && s.Bucket != "" @@ -808,29 +809,29 @@ func (s *S3Artifact) HasLocation() bool { // GitArtifact is the location of an git artifact type GitArtifact struct { // Repo is the git repository - Repo string `json:"repo"` + Repo string `json:"repo" protobuf:"bytes,1,opt,name=repo"` // Revision is the git commit, tag, branch to checkout - Revision string `json:"revision,omitempty"` + Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` // Depth specifies clones/fetches should be shallow and include the given // number of commits from the branch tip - Depth *uint `json:"depth,omitempty"` + Depth *uint64 `json:"depth,omitempty" protobuf:"bytes,3,opt,name=depth"` // Fetch specifies a number of refs that should be fetched before checkout - Fetch []string `json:"fetch,omitempty"` + Fetch []string `json:"fetch,omitempty" protobuf:"bytes,4,rep,name=fetch"` // UsernameSecret is the secret selector to the repository username - UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty"` + UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty" protobuf:"bytes,5,opt,name=usernameSecret"` // PasswordSecret is the secret selector to the repository password - PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty"` + PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,6,opt,name=passwordSecret"` // SSHPrivateKeySecret is the secret selector to the repository ssh private key - SSHPrivateKeySecret *apiv1.SecretKeySelector `json:"sshPrivateKeySecret,omitempty"` + SSHPrivateKeySecret *apiv1.SecretKeySelector `json:"sshPrivateKeySecret,omitempty" protobuf:"bytes,7,opt,name=sshPrivateKeySecret"` // InsecureIgnoreHostKey disables SSH strict host key checking during git clone - InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty"` + InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty" protobuf:"varint,8,opt,name=insecureIgnoreHostKey"` } func (g *GitArtifact) HasLocation() bool { @@ -840,22 +841,22 @@ func (g *GitArtifact) HasLocation() bool { // ArtifactoryAuth describes the secret selectors required for authenticating to artifactory type ArtifactoryAuth struct { // UsernameSecret is the secret selector to the repository username - UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty"` + UsernameSecret *apiv1.SecretKeySelector `json:"usernameSecret,omitempty" protobuf:"bytes,1,opt,name=usernameSecret"` // PasswordSecret is the secret selector to the repository password - PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty"` + PasswordSecret *apiv1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,2,opt,name=passwordSecret"` } // ArtifactoryArtifact is the location of an artifactory artifact type ArtifactoryArtifact struct { // URL of the artifact - URL string `json:"url"` - ArtifactoryAuth `json:",inline"` + URL string `json:"url" protobuf:"bytes,1,opt,name=url"` + ArtifactoryAuth `json:",inline" protobuf:"bytes,2,opt,name=artifactoryAuth"` } -func (a *ArtifactoryArtifact) String() string { - return a.URL -} +//func (a *ArtifactoryArtifact) String() string { +// return a.URL +//} func (a *ArtifactoryArtifact) HasLocation() bool { return a != nil && a.URL != "" @@ -863,13 +864,13 @@ func (a *ArtifactoryArtifact) HasLocation() bool { // HDFSArtifact is the location of an HDFS artifact type HDFSArtifact struct { - HDFSConfig `json:",inline"` + HDFSConfig `json:",inline" protobuf:"bytes,1,opt,name=hDFSConfig"` // Path is a file path in HDFS - Path string `json:"path"` + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // Force copies a file forcibly even if it exists (default: false) - Force bool `json:"force,omitempty"` + Force bool `json:"force,omitempty" protobuf:"varint,3,opt,name=force"` } func (h *HDFSArtifact) HasLocation() bool { @@ -878,59 +879,48 @@ func (h *HDFSArtifact) HasLocation() bool { // HDFSConfig is configurations for HDFS type HDFSConfig struct { - HDFSKrbConfig `json:",inline"` + HDFSKrbConfig `json:",inline" protobuf:"bytes,1,opt,name=hDFSKrbConfig"` // Addresses is accessible addresses of HDFS name nodes - Addresses []string `json:"addresses"` + Addresses []string `json:"addresses" protobuf:"bytes,2,rep,name=addresses"` // HDFSUser is the user to access HDFS file system. // It is ignored if either ccache or keytab is used. - HDFSUser string `json:"hdfsUser,omitempty"` + HDFSUser string `json:"hdfsUser,omitempty" protobuf:"bytes,3,opt,name=hdfsUser"` } // HDFSKrbConfig is auth configurations for Kerberos type HDFSKrbConfig struct { // KrbCCacheSecret is the secret selector for Kerberos ccache // Either ccache or keytab can be set to use Kerberos. - KrbCCacheSecret *apiv1.SecretKeySelector `json:"krbCCacheSecret,omitempty"` + KrbCCacheSecret *apiv1.SecretKeySelector `json:"krbCCacheSecret,omitempty" protobuf:"bytes,1,opt,name=krbCCacheSecret"` // KrbKeytabSecret is the secret selector for Kerberos keytab // Either ccache or keytab can be set to use Kerberos. - KrbKeytabSecret *apiv1.SecretKeySelector `json:"krbKeytabSecret,omitempty"` + KrbKeytabSecret *apiv1.SecretKeySelector `json:"krbKeytabSecret,omitempty" protobuf:"bytes,2,opt,name=krbKeytabSecret"` // KrbUsername is the Kerberos username used with Kerberos keytab // It must be set if keytab is used. - KrbUsername string `json:"krbUsername,omitempty"` + KrbUsername string `json:"krbUsername,omitempty" protobuf:"bytes,3,opt,name=krbUsername"` // KrbRealm is the Kerberos realm used with Kerberos keytab // It must be set if keytab is used. - KrbRealm string `json:"krbRealm,omitempty"` + KrbRealm string `json:"krbRealm,omitempty" protobuf:"bytes,4,opt,name=krbRealm"` // KrbConfig is the configmap selector for Kerberos config as string // It must be set if either ccache or keytab is used. - KrbConfigConfigMap *apiv1.ConfigMapKeySelector `json:"krbConfigConfigMap,omitempty"` + KrbConfigConfigMap *apiv1.ConfigMapKeySelector `json:"krbConfigConfigMap,omitempty" protobuf:"bytes,5,opt,name=krbConfigConfigMap"` // KrbServicePrincipalName is the principal name of Kerberos service // It must be set if either ccache or keytab is used. - KrbServicePrincipalName string `json:"krbServicePrincipalName,omitempty"` + KrbServicePrincipalName string `json:"krbServicePrincipalName,omitempty" protobuf:"bytes,6,opt,name=krbServicePrincipalName"` } -func (a *HDFSArtifact) String() string { - var cred string - if a.HDFSUser != "" { - cred = fmt.Sprintf("HDFS user %s", a.HDFSUser) - } else if a.KrbCCacheSecret != nil { - cred = fmt.Sprintf("ccache %v", a.KrbCCacheSecret.Name) - } else if a.KrbKeytabSecret != nil { - cred = fmt.Sprintf("keytab %v (%s/%s)", a.KrbKeytabSecret.Name, a.KrbUsername, a.KrbRealm) - } - return fmt.Sprintf("hdfs://%s/%s with %s", strings.Join(a.Addresses, ", "), a.Path, cred) -} // RawArtifact allows raw string content to be placed as an artifact in a container type RawArtifact struct { // Data is the string contents of the artifact - Data string `json:"data"` + Data string `json:"data" protobuf:"bytes,1,opt,name=data"` } func (r *RawArtifact) HasLocation() bool { @@ -940,7 +930,7 @@ func (r *RawArtifact) HasLocation() bool { // HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container type HTTPArtifact struct { // URL of the artifact - URL string `json:"url"` + URL string `json:"url" protobuf:"bytes,1,opt,name=url"` } func (h *HTTPArtifact) HasLocation() bool { @@ -949,35 +939,35 @@ func (h *HTTPArtifact) HasLocation() bool { // ScriptTemplate is a template subtype to enable scripting through code steps type ScriptTemplate struct { - apiv1.Container `json:",inline"` + apiv1.Container `json:",inline" protobuf:"bytes,1,opt,name=container"` // Source contains the source code of the script to execute - Source string `json:"source"` + Source string `json:"source" protobuf:"bytes,2,opt,name=source"` } // ResourceTemplate is a template subtype to manipulate kubernetes resources type ResourceTemplate struct { // Action is the action to perform to the resource. // Must be one of: get, create, apply, delete, replace, patch - Action string `json:"action"` + Action string `json:"action" protobuf:"bytes,1,opt,name=action"` // MergeStrategy is the strategy used to merge a patch. It defaults to "strategic" // Must be one of: strategic, merge, json - MergeStrategy string `json:"mergeStrategy,omitempty"` + MergeStrategy string `json:"mergeStrategy,omitempty" protobuf:"bytes,2,opt,name=mergeStrategy"` // Manifest contains the kubernetes manifest - Manifest string `json:"manifest"` + Manifest string `json:"manifest" protobuf:"bytes,3,opt,name=manifest"` // SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource. - SetOwnerReference bool `json:"setOwnerReference,omitempty"` + SetOwnerReference bool `json:"setOwnerReference,omitempty" protobuf:"varint,4,opt,name=setOwnerReference"` // SuccessCondition is a label selector expression which describes the conditions // of the k8s resource in which it is acceptable to proceed to the following step - SuccessCondition string `json:"successCondition,omitempty"` + SuccessCondition string `json:"successCondition,omitempty" protobuf:"bytes,5,opt,name=successCondition"` // FailureCondition is a label selector expression which describes the conditions // of the k8s resource in which the step was considered failed - FailureCondition string `json:"failureCondition,omitempty"` + FailureCondition string `json:"failureCondition,omitempty" protobuf:"bytes,6,opt,name=failureCondition"` } // GetType returns the type of this template @@ -1024,10 +1014,10 @@ func (tmpl *Template) IsLeaf() bool { // DAGTemplate is a template subtype for directed acyclic graph templates type DAGTemplate struct { // Target are one or more names of targets to execute in a DAG - Target string `json:"target,omitempty"` + Target string `json:"target,omitempty" protobuf:"bytes,1,opt,name=target"` // Tasks are a list of DAG tasks - Tasks []DAGTask `json:"tasks"` + Tasks []DAGTask `json:"tasks" protobuf:"bytes,2,rep,name=tasks"` // This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, // as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed @@ -1035,42 +1025,42 @@ type DAGTemplate struct { // The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to // completion (either success or failure), regardless of the failed outcomes of branches in the DAG. // More info and example about this feature at https://github.com/argoproj/argo/issues/1442 - FailFast *bool `json:"failFast,omitempty"` + FailFast *bool `json:"failFast,omitempty" protobuf:"varint,3,opt,name=failFast"` } // DAGTask represents a node in the graph during DAG execution type DAGTask struct { // Name is the name of the target - Name string `json:"name"` + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` // Name of template to execute - Template string `json:"template"` + Template string `json:"template" protobuf:"bytes,2,opt,name=template"` // Arguments are the parameter and artifact arguments to the template - Arguments Arguments `json:"arguments,omitempty"` + Arguments Arguments `json:"arguments,omitempty" protobuf:"bytes,3,opt,name=arguments"` // TemplateRef is the reference to the template resource to execute. - TemplateRef *TemplateRef `json:"templateRef,omitempty"` + TemplateRef *TemplateRef `json:"templateRef,omitempty" protobuf:"bytes,4,opt,name=templateRef"` // Dependencies are name of other targets which this depends on - Dependencies []string `json:"dependencies,omitempty"` + Dependencies []string `json:"dependencies,omitempty" protobuf:"bytes,5,rep,name=dependencies"` // WithItems expands a task into multiple parallel tasks from the items in the list - WithItems []Item `json:"withItems,omitempty"` + WithItems []Item `json:"withItems,omitempty" protobuf:"bytes,6,rep,name=withItems"` // WithParam expands a task into multiple parallel tasks from the value in the parameter, // which is expected to be a JSON list. - WithParam string `json:"withParam,omitempty"` + WithParam string `json:"withParam,omitempty" protobuf:"bytes,7,opt,name=withParam"` // WithSequence expands a task into a numeric sequence - WithSequence *Sequence `json:"withSequence,omitempty"` + WithSequence *Sequence `json:"withSequence,omitempty" protobuf:"bytes,8,opt,name=withSequence"` // When is an expression in which the task should conditionally execute - When string `json:"when,omitempty"` + When string `json:"when,omitempty" protobuf:"bytes,9,opt,name=when"` // ContinueOn makes argo to proceed with the following step even if this step fails. // Errors and Failed states can be specified - ContinueOn *ContinueOn `json:"continueOn,omitempty"` + ContinueOn *ContinueOn `json:"continueOn,omitempty" protobuf:"bytes,10,opt,name=continueOn"` } var _ TemplateHolder = &DAGTask{} @@ -1186,9 +1176,9 @@ func (wf *Workflow) NodeID(name string) string { // It can be specified if the workflow should continue when the pod errors, fails or both. type ContinueOn struct { // +optional - Error bool `json:"error,omitempty"` + Error bool `json:"error,omitempty" protobuf:"varint,1,opt,name=error"` // +optional - Failed bool `json:"failed,omitempty"` + Failed bool `json:"failed,omitempty" protobuf:"varint,2,opt,name=failed"` } func continues(c *ContinueOn, phase NodePhase) bool { diff --git a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go index 81a01da08f82..fbfa0881342a 100644 --- a/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go @@ -5,6 +5,8 @@ package v1alpha1 import ( + json "encoding/json" + v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -295,7 +297,7 @@ func (in *GitArtifact) DeepCopyInto(out *GitArtifact) { *out = *in if in.Depth != nil { in, out := &in.Depth, &out.Depth - *out = new(uint) + *out = new(uint64) **out = **in } if in.Fetch != nil { @@ -457,6 +459,40 @@ func (in *Item) DeepCopy() *Item { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ItemValue) DeepCopyInto(out *ItemValue) { + *out = *in + if in.MapVal != nil { + in, out := &in.MapVal, &out.MapVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ListVal != nil { + in, out := &in.ListVal, &out.ListVal + *out = make([]json.RawMessage, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ItemValue. +func (in *ItemValue) DeepCopy() *ItemValue { + if in == nil { + return nil + } + out := new(ItemValue) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Metadata) DeepCopyInto(out *Metadata) { *out = *in @@ -586,6 +622,29 @@ func (in *Outputs) DeepCopy() *Outputs { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParallelSteps) DeepCopyInto(out *ParallelSteps) { + *out = *in + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]WorkflowStep, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelSteps. +func (in *ParallelSteps) DeepCopy() *ParallelSteps { + if in == nil { + return nil + } + out := new(ParallelSteps) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Parameter) DeepCopyInto(out *Parameter) { *out = *in @@ -822,15 +881,9 @@ func (in *Template) DeepCopyInto(out *Template) { } if in.Steps != nil { in, out := &in.Steps, &out.Steps - *out = make([][]WorkflowStep, len(*in)) + *out = make([]ParallelSteps, len(*in)) for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = make([]WorkflowStep, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } + (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Container != nil { diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 044e339b668e..79a873f3867b 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -1275,7 +1275,7 @@ func (woc *wfOperationCtx) initializeNode(nodeName string, nodeType wfv1.NodeTyp node.Message = messages[0] } woc.wf.Status.Nodes[nodeID] = node - woc.log.Infof("%s node %s initialized %s%s", node.Type, node, node.Phase, message) + woc.log.Infof("%s node %v initialized %s%s", node.Type, node, node.Phase, message) woc.updated = true return &node } @@ -1574,12 +1574,12 @@ func (woc *wfOperationCtx) processAggregateNodeOutputs(tmpl *wfv1.Template, scop } if node.Outputs.Result != nil { // Support the case where item may be a map - var itemMap map[string]interface{} + var itemMap map[string]wfv1.ItemValue err := json.Unmarshal([]byte(*node.Outputs.Result), &itemMap) if err == nil { - resultsList = append(resultsList, wfv1.Item{Value: itemMap}) + resultsList = append(resultsList, wfv1.Item{Type:wfv1.Map,MapVal: itemMap}) } else { - resultsList = append(resultsList, wfv1.Item{Value: *node.Outputs.Result}) + resultsList = append(resultsList, wfv1.Item{Type:wfv1.String, StrVal: *node.Outputs.Result}) } } } @@ -1734,38 +1734,34 @@ func (woc *wfOperationCtx) executeResource(nodeName string, tmpl *wfv1.Template, func processItem(fstTmpl *fasttemplate.Template, name string, index int, item wfv1.Item, obj interface{}) (string, error) { replaceMap := make(map[string]string) var newName string - switch val := item.Value.(type) { - case string, int, int32, int64, float32, float64, bool: + val := item.Type + switch val{ + case wfv1.String, wfv1.Number, wfv1.Bool: replaceMap["item"] = fmt.Sprintf("%v", val) newName = fmt.Sprintf("%s(%d:%v)", name, index, val) - case map[string]interface{}: + case wfv1.Map: // Handle the case when withItems is a list of maps. // vals holds stringified versions of the map items which are incorporated as part of the step name. // For example if the item is: {"name": "jesse","group":"developer"} // the vals would be: ["name:jesse", "group:developer"] // This would eventually be part of the step name (group:developer,name:jesse) vals := make([]string, 0) - for itemKey, itemValIf := range val { - switch itemVal := itemValIf.(type) { - case string, int, int32, int64, float32, float64, bool: - replaceMap[fmt.Sprintf("item.%s", itemKey)] = fmt.Sprintf("%v", itemVal) - vals = append(vals, fmt.Sprintf("%s:%s", itemKey, itemVal)) - default: - return "", errors.Errorf(errors.CodeBadRequest, "withItems[%d][%s] expected string or number. received: %v", index, itemKey, itemVal) - } + for itemKey, itemVal := range item.MapVal { + replaceMap[fmt.Sprintf("item.%s", itemKey)] = fmt.Sprintf("%v", itemVal) + } // sort the values so that the name is deterministic sort.Strings(vals) newName = fmt.Sprintf("%s(%d:%v)", name, index, strings.Join(vals, ",")) - case []interface{}: - byteVal, err := json.Marshal(val) + case wfv1.List: + byteVal, err := json.Marshal(item.ListVal) if err != nil { return "", errors.InternalWrapError(err) } replaceMap["item"] = string(byteVal) - newName = fmt.Sprintf("%s(%d:%v)", name, index, val) + newName = fmt.Sprintf("%s(%d:%v)", name, index, item.ListVal) default: - return "", errors.Errorf(errors.CodeBadRequest, "withItems[%d] expected string, number, list, or map. received: %s", index, val) + return "", errors.Errorf(errors.CodeBadRequest, "withItems[%d] expected string, number, list, or map. received: %v", index, val) } newStepStr, err := common.Replace(fstTmpl, replaceMap, false) if err != nil { @@ -1811,11 +1807,11 @@ func expandSequence(seq *wfv1.Sequence) ([]wfv1.Item, error) { } if start <= end { for i := start; i <= end; i++ { - items = append(items, wfv1.Item{Value: fmt.Sprintf(format, i)}) + items = append(items, wfv1.Item{Type:wfv1.Number, StrVal: fmt.Sprintf(format, i)}) } } else { for i := start; i >= end; i-- { - items = append(items, wfv1.Item{Value: fmt.Sprintf(format, i)}) + items = append(items, wfv1.Item{Type:wfv1.Number, StrVal: fmt.Sprintf(format, i)}) } } return items, nil diff --git a/workflow/controller/operator_test.go b/workflow/controller/operator_test.go index 5fedc478b0ae..b8fd9bf8deed 100644 --- a/workflow/controller/operator_test.go +++ b/workflow/controller/operator_test.go @@ -607,7 +607,7 @@ func TestExpandWithItems(t *testing.T) { wf, err := wfcset.Create(wf) assert.Nil(t, err) woc := newWorkflowOperationCtx(wf, controller) - newSteps, err := woc.expandStep(wf.Spec.Templates[0].Steps[0][0]) + newSteps, err := woc.expandStep(wf.Spec.Templates[0].Steps[0].Steps[0]) assert.Nil(t, err) assert.Equal(t, 5, len(newSteps)) woc.operate() @@ -655,7 +655,7 @@ func TestExpandWithItemsMap(t *testing.T) { wf, err := wfcset.Create(wf) assert.Nil(t, err) woc := newWorkflowOperationCtx(wf, controller) - newSteps, err := woc.expandStep(wf.Spec.Templates[0].Steps[0][0]) + newSteps, err := woc.expandStep(wf.Spec.Templates[0].Steps[0].Steps[0]) assert.Nil(t, err) assert.Equal(t, 3, len(newSteps)) } @@ -892,8 +892,8 @@ func TestExpandWithSequence(t *testing.T) { items, err = expandSequence(&seq) assert.NoError(t, err) assert.Equal(t, 10, len(items)) - assert.Equal(t, "0", items[0].Value.(string)) - assert.Equal(t, "9", items[9].Value.(string)) + assert.Equal(t, "0", items[0].StrVal) + assert.Equal(t, "9", items[9].StrVal) seq = wfv1.Sequence{ Start: "101", @@ -902,8 +902,8 @@ func TestExpandWithSequence(t *testing.T) { items, err = expandSequence(&seq) assert.NoError(t, err) assert.Equal(t, 10, len(items)) - assert.Equal(t, "101", items[0].Value.(string)) - assert.Equal(t, "110", items[9].Value.(string)) + assert.Equal(t, "101", items[0].StrVal) + assert.Equal(t, "110", items[9].StrVal) seq = wfv1.Sequence{ Start: "50", @@ -912,8 +912,8 @@ func TestExpandWithSequence(t *testing.T) { items, err = expandSequence(&seq) assert.NoError(t, err) assert.Equal(t, 11, len(items)) - assert.Equal(t, "50", items[0].Value.(string)) - assert.Equal(t, "60", items[10].Value.(string)) + assert.Equal(t, "50", items[0].StrVal) + assert.Equal(t, "60", items[10].StrVal) seq = wfv1.Sequence{ Start: "60", @@ -922,8 +922,8 @@ func TestExpandWithSequence(t *testing.T) { items, err = expandSequence(&seq) assert.NoError(t, err) assert.Equal(t, 11, len(items)) - assert.Equal(t, "60", items[0].Value.(string)) - assert.Equal(t, "50", items[10].Value.(string)) + assert.Equal(t, "60", items[0].StrVal) + assert.Equal(t, "50", items[10].StrVal) seq = wfv1.Sequence{ Count: "0", @@ -939,7 +939,7 @@ func TestExpandWithSequence(t *testing.T) { items, err = expandSequence(&seq) assert.NoError(t, err) assert.Equal(t, 1, len(items)) - assert.Equal(t, "8", items[0].Value.(string)) + assert.Equal(t, "8", items[0].StrVal) seq = wfv1.Sequence{ Format: "testuser%02X", @@ -949,8 +949,8 @@ func TestExpandWithSequence(t *testing.T) { items, err = expandSequence(&seq) assert.NoError(t, err) assert.Equal(t, 10, len(items)) - assert.Equal(t, "testuser01", items[0].Value.(string)) - assert.Equal(t, "testuser0A", items[9].Value.(string)) + assert.Equal(t, "testuser01", items[0].StrVal) + assert.Equal(t, "testuser0A", items[9].StrVal) } var metadataTemplate = ` diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index 1c16ff2bf537..63be9bf3e6d9 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -76,7 +76,7 @@ func (woc *wfOperationCtx) executeSteps(nodeName string, tmplCtx *templateresolu } } } - sgNode = woc.executeStepGroup(stepGroup, sgNodeName, &stepsCtx) + sgNode = woc.executeStepGroup(stepGroup.Steps, sgNodeName, &stepsCtx) if !sgNode.Completed() { woc.log.Infof("Workflow step group node %v not yet completed", sgNode) return node @@ -90,7 +90,7 @@ func (woc *wfOperationCtx) executeSteps(nodeName string, tmplCtx *templateresolu } // Add all outputs of each step in the group to the scope - for _, step := range stepGroup { + for _, step := range stepGroup.Steps { childNodeName := fmt.Sprintf("%s.%s", sgNodeName, step.Name) childNode := woc.getNodeByName(childNodeName) prefix := fmt.Sprintf("steps.%s", step.Name) diff --git a/workflow/util/util.go b/workflow/util/util.go index 9982b4250591..c1aa9d2f814d 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -457,7 +457,7 @@ func FormulateResubmitWorkflow(wf *wfv1.Workflow, memoized bool) (*wfv1.Workflow // NOTE: NodeRunning shouldn't really happen except in weird scenarios where controller // mismanages state (e.g. panic when operating on a workflow) default: - return nil, errors.InternalErrorf("Workflow cannot be resubmitted with node %s in %s phase", node, node.Phase) + return nil, errors.InternalErrorf("Workflow cannot be resubmitted with node %v in %s phase", node, node.Phase) } } return &newWF, nil @@ -512,8 +512,9 @@ func RetryWorkflow(kubeClient kubernetes.Interface, wfClient v1alpha1.WorkflowIn } // do not add this status to the node. pretend as if this node never existed. default: + // Do not allow retry of workflows with pods in Running/Pending phase - return nil, errors.InternalErrorf("Workflow cannot be retried with node %s in %s phase", node, node.Phase) + return nil, errors.InternalErrorf("Workflow cannot be retried with node %v in %s phase", node, node.Phase) } if node.Type == wfv1.NodeTypePod { log.Infof("Deleting pod: %s", node.ID) diff --git a/workflow/validate/validate.go b/workflow/validate/validate.go index dbdb998053eb..cbbc973d8450 100644 --- a/workflow/validate/validate.go +++ b/workflow/validate/validate.go @@ -518,7 +518,7 @@ func (ctx *templateValidationCtx) validateSteps(scope map[string]interface{}, tm stepNames := make(map[string]bool) resolvedTemplates := make(map[string]*wfv1.Template) for i, stepGroup := range tmpl.Steps { - for _, step := range stepGroup { + for _, step := range stepGroup.Steps { if step.Name == "" { return errors.Errorf(errors.CodeBadRequest, "templates.%s.steps[%d].name is required", tmpl.Name, i) } @@ -553,7 +553,7 @@ func (ctx *templateValidationCtx) validateSteps(scope map[string]interface{}, tm } resolvedTemplates[step.Name] = resolvedTmpl } - for _, step := range stepGroup { + for _, step := range stepGroup.Steps { aggregate := len(step.WithItems) > 0 || step.WithParam != "" resolvedTmpl := resolvedTemplates[step.Name] ctx.addOutputsToScope(resolvedTmpl, fmt.Sprintf("steps.%s", step.Name), scope, aggregate) @@ -578,11 +578,12 @@ func addItemsToScope(prefix string, withItems []wfv1.Item, withParam string, wit } if len(withItems) > 0 { for i := range withItems { - switch val := withItems[i].Value.(type) { - case string, int, int32, int64, float32, float64, bool: + val := withItems[i] + switch val.Type { + case wfv1.String, wfv1.Number, wfv1.Bool: scope["item"] = true - case map[string]interface{}: - for itemKey := range val { + case wfv1.Map: + for itemKey := range val.MapVal { scope[fmt.Sprintf("item.%s", itemKey)] = true } default: From 910060c668d9fcb14fb59487aaf874107c715d7b Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Wed, 18 Sep 2019 22:07:38 -0700 Subject: [PATCH 002/421] added Initial ApiServer --- Gopkg.lock | 44 + cmd/client.go | 32 + cmd/main.go | 88 + cmd/server/argoserver.go | 188 + cmd/server/workflow/workflow.pb.go | 1185 ++++++ cmd/server/workflow/workflow.pb.gw.go | 204 + cmd/server/workflow/workflow.proto | 47 + cmd/server/workflow/workflow.swagger.json | 3777 ++++++++++++++++++ cmd/server/workflow/workflow_service.go | 67 + cmd/server/workflow/workflow_service_test.go | 176 + hack/generate-proto.sh | 160 +- pkg/apiclient/apiclient.go | 10 + util/json/json.go | 90 + 13 files changed, 5988 insertions(+), 80 deletions(-) create mode 100644 cmd/client.go create mode 100644 cmd/main.go create mode 100644 cmd/server/argoserver.go create mode 100644 cmd/server/workflow/workflow.pb.go create mode 100644 cmd/server/workflow/workflow.pb.gw.go create mode 100644 cmd/server/workflow/workflow.proto create mode 100644 cmd/server/workflow/workflow.swagger.json create mode 100644 cmd/server/workflow/workflow_service.go create mode 100644 cmd/server/workflow/workflow_service_test.go create mode 100644 pkg/apiclient/apiclient.go create mode 100644 util/json/json.go diff --git a/Gopkg.lock b/Gopkg.lock index e3bd5691f5e9..cdcd5eeda3bc 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -55,6 +55,25 @@ pruneopts = "" revision = "de5bf2ad457846296e2031421a34e2568e304e35" +[[projects]] + branch = "master" + digest = "1:a7f619ffc7b99687f9444bd0a07509fec8ae708a7175d234878129896c0918a8" + name = "github.com/alecthomas/template" + packages = [ + ".", + "parse", + ] + pruneopts = "" + revision = "fb15b899a75114aa79cc930e33c46b577cc664b1" + +[[projects]] + branch = "master" + digest = "1:4727e371347b0e2e983bb402ecf5facb720ffe4496cea4c3cad4a97dc1d32772" + name = "github.com/alecthomas/units" + packages = ["."] + pruneopts = "" + revision = "680d30ca31172657fa50e996eb82d790d1d8b96e" + [[projects]] branch = "master" digest = "1:992caf139336e7efda99ca252fe6f26d3b588ab7e4cd041534f7a4501009748e" @@ -233,7 +252,9 @@ digest = "1:fd53b471edb4c28c7d297f617f4da0d33402755f58d6301e7ca1197ef0a90937" name = "github.com/gogo/protobuf" packages = [ + "gogoproto", "proto", + "protoc-gen-gogo/descriptor", "sortkeys", ] pruneopts = "" @@ -499,6 +520,7 @@ packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", + "log", "model", ] pruneopts = "" @@ -734,6 +756,8 @@ "cpu", "unix", "windows", + "windows/registry", + "windows/svc/eventlog", ] pruneopts = "" revision = "51ab0e2deafac1f46c46ad59cf0921be2f180c3d" @@ -826,6 +850,7 @@ digest = "1:95b0a53d4d31736b2483a8c41667b2bd83f303706106f81bd2f54e3f9c24eaf4" name = "google.golang.org/genproto" packages = [ + "googleapis/api/annotations", "googleapis/api/httpbody", "googleapis/rpc/status", "protobuf/field_mask", @@ -875,6 +900,14 @@ revision = "045159ad57f3781d409358e3ade910a018c16b30" version = "v1.22.1" +[[projects]] + digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1" + name = "gopkg.in/alecthomas/kingpin.v2" + packages = ["."] + pruneopts = "" + revision = "947dcec5ba9c011838740e680966fd7087a71d0d" + version = "v2.2.6" + [[projects]] digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" name = "gopkg.in/inf.v0" @@ -1381,13 +1414,18 @@ "github.com/evanphx/json-patch", "github.com/ghodss/yaml", "github.com/go-openapi/spec", + "github.com/gogo/protobuf/gogoproto", "github.com/gogo/protobuf/proto", "github.com/gogo/protobuf/sortkeys", + "github.com/golang/protobuf/proto", "github.com/gorilla/websocket", + "github.com/grpc-ecosystem/grpc-gateway/runtime", + "github.com/grpc-ecosystem/grpc-gateway/utilities", "github.com/mitchellh/go-ps", "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", + "github.com/prometheus/common/log", "github.com/sirupsen/logrus", "github.com/spf13/cobra", "github.com/stretchr/testify/assert", @@ -1396,6 +1434,12 @@ "github.com/tidwall/gjson", "github.com/valyala/fasttemplate", "golang.org/x/crypto/ssh", + "golang.org/x/net/context", + "google.golang.org/genproto/googleapis/api/annotations", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/grpclog", + "google.golang.org/grpc/status", "gopkg.in/jcmturner/gokrb5.v5/client", "gopkg.in/jcmturner/gokrb5.v5/config", "gopkg.in/jcmturner/gokrb5.v5/credentials", diff --git a/cmd/client.go b/cmd/client.go new file mode 100644 index 000000000000..b688bbfd73ea --- /dev/null +++ b/cmd/client.go @@ -0,0 +1,32 @@ +package main + +import ( + "context" + "fmt" + "github.com/argoproj/argo/cmd/server/workflow" + "google.golang.org/grpc" +) + +func main(){ + + conn, err := grpc.Dial("localhost:8082", grpc.WithInsecure()) + if err != nil { + fmt.Println(err) + } + defer conn.Close() + name := "scripts-bash-5ksp4" + query := workflow.WorkflowQuery{Name: name,} + client := workflow.NewWorkflowServiceClient(conn) + wflist, err :=client.List(context.TODO(),&query) + if err !=nil { + fmt.Println("errr",err) + } + + //byte1, err := wflist.Workflows.Marshal() + for inx,_ := range wflist.Workflows { + fmt.Println("Response:", wflist.Workflows[inx].Name) + fmt.Println("/n /n") + } + + +} diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 000000000000..c835840a7215 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,88 @@ +package main + +import ( + "fmt" + "github.com/argoproj/argo/cmd/server" + wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" + cmdutil "github.com/argoproj/argo/util/cmd" + "github.com/argoproj/pkg/cli" + kubecli "github.com/argoproj/pkg/kube/cli" + "github.com/argoproj/pkg/stats" + "github.com/spf13/cobra" + "golang.org/x/net/context" + "k8s.io/client-go/tools/clientcmd" + "os" + "time" +) + +const ( + // CLIName is the name of the CLI + CLIName = "argo-api-server" +) + +// NewRootCommand returns an new instance of the workflow-controller main entrypoint +func NewRootCommand() *cobra.Command { + var ( + clientConfig clientcmd.ClientConfig + logLevel string // --loglevel + ) + + var command = cobra.Command{ + Use: CLIName, + Short: "Argo api server", + RunE: func(c *cobra.Command, args []string) error { + cli.SetLogLevel(logLevel) + stats.RegisterStackDumper() + stats.StartStatsTicker(5 * time.Minute) + + config, err := clientConfig.ClientConfig() + if err != nil { + return err + } + config.Burst = 30 + config.QPS = 20.0 + + namespace, _, err := clientConfig.Namespace() + if err != nil { + return err + } + + //kubeclientset := kubernetes.NewForConfigOrDie(config) + wflientset := wfclientset.NewForConfigOrDie(config) + + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(context.Background()) + + var opts = server.ArgoServerOpts{Namespace: namespace, KubeClientset: wflientset} + argoSvr := server.NewArgoServer(ctx, opts ) + defer cancel() + go argoSvr.Run(ctx,8082) + + + // Wait forever + select {} + + }, + } + + clientConfig = kubecli.AddKubectlFlagsToCmd(&command) + command.AddCommand(cmdutil.NewVersionCmd(CLIName)) + //command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration") + //command.Flags().StringVar(&executorImage, "executor-image", "", "Executor image to use (overrides value in configmap)") + //command.Flags().StringVar(&executorImagePullPolicy, "executor-image-pull-policy", "", "Executor imagePullPolicy to use (overrides value in configmap)") + command.Flags().StringVar(&logLevel, "loglevel", "debug", "Set the logging level. One of: debug|info|warn|error") + //command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level") + //command.Flags().IntVar(&workflowWorkers, "workflow-workers", 8, "Number of workflow workers") + //command.Flags().IntVar(&podWorkers, "pod-workers", 8, "Number of pod workers") + return &command +} + +func main() { + if err := NewRootCommand().Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} \ No newline at end of file diff --git a/cmd/server/argoserver.go b/cmd/server/argoserver.go new file mode 100644 index 000000000000..587c7e43e713 --- /dev/null +++ b/cmd/server/argoserver.go @@ -0,0 +1,188 @@ +package server + +import ( + "github.com/argoproj/argo/cmd/server/workflow" + "github.com/argoproj/argo/pkg/apiclient" + "github.com/argoproj/argo/pkg/client/clientset/versioned" + golang_proto "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/prometheus/common/log" + "golang.org/x/net/context" + "google.golang.org/grpc" + "k8s.io/apimachinery/pkg/util/wait" + "net" + "regexp" + + "fmt" + "k8s.io/client-go/kubernetes" + //"net" + "net/http" + "time" +) + +type ArgoServer struct { + Namespace string + KubeClientset kubernetes.Clientset + wfClientSet *versioned.Clientset +} + +type ArgoServerOpts struct { + Insecure bool + Namespace string + KubeClientset *versioned.Clientset +} + +func NewArgoServer(ctx context.Context, opts ArgoServerOpts) *ArgoServer{ + + return &ArgoServer{Namespace: opts.Namespace, wfClientSet: opts.KubeClientset } +} + +var backoff = wait.Backoff{ + Steps: 5, + Duration: 500 * time.Millisecond, + Factor: 1.0, + Jitter: 0.1, +} +func (as *ArgoServer)Run(ctx context.Context, port int){ + grpcs := as.newGRPCServer() + //grpcWebS := grpcweb.WrapServer(grpcs) + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", 8082)) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + grpcs.Serve(lis) + + //// Start listener + //var realErr error + //_ = wait.ExponentialBackoff(backoff, func() (bool, error) { + // http.ListenAndServe(":8082", grpcs) + // if realErr != nil { + // log.Warnf("failed listen: %v", realErr) + // return false, nil + // } + // return true, nil + //}) + //errors.CheckError(realErr) +} + +func (as *ArgoServer) newGRPCServer() *grpc.Server { + sOpts := []grpc.ServerOption{ + // Set the both send and receive the bytes limit to be 100MB + // The proper way to achieve high performance is to have pagination + // while we work toward that, we can have high limit first + grpc.MaxRecvMsgSize(apiclient.MaxGRPCMessageSize), + grpc.MaxSendMsgSize(apiclient.MaxGRPCMessageSize), + grpc.ConnectionTimeout(300 * time.Second), + } + + grpcS := grpc.NewServer(sOpts...) + workflowService := workflow.NewServer(as.Namespace, *as.wfClientSet) + workflow.RegisterWorkflowServiceServer(grpcS, workflowService) + return grpcS +} + +//// newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented +//// using grpc-gateway as a proxy to the gRPC server. +//func (a *ArgoServer) newHTTPServer(ctx context.Context, port int, grpcWebHandler http.Handler) *http.Server { +// endpoint := fmt.Sprintf("localhost:%d", port) +// mux := http.NewServeMux() +// httpS := http.Server{ +// Addr: endpoint, +// Handler: &handlerSwitcher{ +// handler: &bug21955Workaround{handler: mux}, +// contentTypeToHandler: map[string]http.Handler{ +// "application/grpc-web+proto": grpcWebHandler, +// }, +// }, +// } +// var dOpts []grpc.DialOption +// dOpts = append(dOpts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(apiclient.MaxGRPCMessageSize))) +// //dOpts = append(dOpts, grpc.WithUserAgent(fmt.Sprintf("%s/%s", common.ArgoCDUserAgentName, argocd.GetVersion().Version))) +// +// dOpts = append(dOpts, grpc.WithInsecure()) +// +// // HTTP 1.1+JSON Server +// // grpc-ecosystem/grpc-gateway is used to proxy HTTP requests to the corresponding gRPC call +// // NOTE: if a marshaller option is not supplied, grpc-gateway will default to the jsonpb from +// // golang/protobuf. Which does not support types such as time.Time. gogo/protobuf does support +// // time.Time, but does not support custom UnmarshalJSON() and MarshalJSON() methods. Therefore +// //// we use our own Marshaler +// gwMuxOpts := runtime.WithMarshalerOption(runtime.MIMEWildcard, new(jsonutil.JSONMarshaler)) +// gwCookieOpts := runtime.WithForwardResponseOption(a.translateGrpcCookieHeader) +// gwmux := runtime.NewServeMux(gwMuxOpts, gwCookieOpts) +// mux.Handle("/api/", gwmux) +// mustRegisterGWHandler(workflow.RegisterWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dOpts) +// +// return &httpS +//} +type registerFunc func(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) error + +// mustRegisterGWHandler is a convenience function to register a gateway handler +func mustRegisterGWHandler(register registerFunc, ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) { + err := register(ctx, mux, endpoint, opts) + if err != nil { + panic(err) + } +} + +type handlerSwitcher struct { + handler http.Handler + contentTypeToHandler map[string]http.Handler +} + +func (s *handlerSwitcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if contentHandler, ok := s.contentTypeToHandler[r.Header.Get("content-type")]; ok { + contentHandler.ServeHTTP(w, r) + } else { + s.handler.ServeHTTP(w, r) + } +} + +// Workaround for https://github.com/golang/go/issues/21955 to support escaped URLs in URL path. +type bug21955Workaround struct { + handler http.Handler +} + +var pathPatters = []*regexp.Regexp{ + regexp.MustCompile(`/api/v1/clusters/[^/]+`), + regexp.MustCompile(`/api/v1/repositories/[^/]+`), + regexp.MustCompile(`/api/v1/repositories/[^/]+/apps`), + regexp.MustCompile(`/api/v1/repositories/[^/]+/apps/[^/]+`), +} + +func (bf *bug21955Workaround) ServeHTTP(w http.ResponseWriter, r *http.Request) { + for _, pattern := range pathPatters { + if pattern.MatchString(r.URL.RawPath) { + r.URL.Path = r.URL.RawPath + break + } + } + bf.handler.ServeHTTP(w, r) +} + +func bug21955WorkaroundInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + return handler(ctx, req) +} + +// newRedirectServer returns an HTTP server which does a 307 redirect to the HTTPS server +func newRedirectServer(port int) *http.Server { + return &http.Server{ + Addr: fmt.Sprintf("localhost:%d", port), + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + target := "https://" + req.Host + req.URL.Path + if len(req.URL.RawQuery) > 0 { + target += "?" + req.URL.RawQuery + } + http.Redirect(w, req, target, http.StatusTemporaryRedirect) + }), + } +} + +// TranslateGrpcCookieHeader conditionally sets a cookie on the response. +func (a *ArgoServer) translateGrpcCookieHeader(ctx context.Context, w http.ResponseWriter, resp golang_proto.Message) error { + + return nil +} + + diff --git a/cmd/server/workflow/workflow.pb.go b/cmd/server/workflow/workflow.pb.go new file mode 100644 index 000000000000..94be449d1b0c --- /dev/null +++ b/cmd/server/workflow/workflow.pb.go @@ -0,0 +1,1185 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cmd/server/workflow/workflow.proto + +// Workflow Service +// +// Workflow Service API performs CRUD actions against application resources + +package workflow + +import ( + context "context" + fmt "fmt" + v1alpha1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + io "io" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type WorkflowCreateResponse struct { + Response string `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowCreateResponse) Reset() { *m = WorkflowCreateResponse{} } +func (m *WorkflowCreateResponse) String() string { return proto.CompactTextString(m) } +func (*WorkflowCreateResponse) ProtoMessage() {} +func (*WorkflowCreateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{0} +} +func (m *WorkflowCreateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowCreateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowCreateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowCreateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowCreateResponse.Merge(m, src) +} +func (m *WorkflowCreateResponse) XXX_Size() int { + return m.Size() +} +func (m *WorkflowCreateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowCreateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowCreateResponse proto.InternalMessageInfo + +func (m *WorkflowCreateResponse) GetResponse() string { + if m != nil { + return m.Response + } + return "" +} + +type WorkflowListResponse struct { + Workflows []*v1alpha1.Workflow `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowListResponse) Reset() { *m = WorkflowListResponse{} } +func (m *WorkflowListResponse) String() string { return proto.CompactTextString(m) } +func (*WorkflowListResponse) ProtoMessage() {} +func (*WorkflowListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{1} +} +func (m *WorkflowListResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowListResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowListResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowListResponse.Merge(m, src) +} +func (m *WorkflowListResponse) XXX_Size() int { + return m.Size() +} +func (m *WorkflowListResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowListResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowListResponse proto.InternalMessageInfo + +func (m *WorkflowListResponse) GetWorkflows() []*v1alpha1.Workflow { + if m != nil { + return m.Workflows + } + return nil +} + +type WorkflowResponse struct { + Workflows *v1alpha1.Workflow `protobuf:"bytes,1,opt,name=workflows,proto3" json:"workflows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowResponse) Reset() { *m = WorkflowResponse{} } +func (m *WorkflowResponse) String() string { return proto.CompactTextString(m) } +func (*WorkflowResponse) ProtoMessage() {} +func (*WorkflowResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{2} +} +func (m *WorkflowResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowResponse.Merge(m, src) +} +func (m *WorkflowResponse) XXX_Size() int { + return m.Size() +} +func (m *WorkflowResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowResponse proto.InternalMessageInfo + +func (m *WorkflowResponse) GetWorkflows() *v1alpha1.Workflow { + if m != nil { + return m.Workflows + } + return nil +} + +type WorkflowQuery struct { + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + StartIdx int32 `protobuf:"varint,3,opt,name=StartIdx,proto3" json:"StartIdx,omitempty"` + PageSize int32 `protobuf:"varint,4,opt,name=PageSize,proto3" json:"PageSize,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowQuery) Reset() { *m = WorkflowQuery{} } +func (m *WorkflowQuery) String() string { return proto.CompactTextString(m) } +func (*WorkflowQuery) ProtoMessage() {} +func (*WorkflowQuery) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{3} +} +func (m *WorkflowQuery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowQuery.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowQuery.Merge(m, src) +} +func (m *WorkflowQuery) XXX_Size() int { + return m.Size() +} +func (m *WorkflowQuery) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowQuery.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowQuery proto.InternalMessageInfo + +func (m *WorkflowQuery) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *WorkflowQuery) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *WorkflowQuery) GetStartIdx() int32 { + if m != nil { + return m.StartIdx + } + return 0 +} + +func (m *WorkflowQuery) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func init() { + proto.RegisterType((*WorkflowCreateResponse)(nil), "workflow.WorkflowCreateResponse") + proto.RegisterType((*WorkflowListResponse)(nil), "workflow.WorkflowListResponse") + proto.RegisterType((*WorkflowResponse)(nil), "workflow.WorkflowResponse") + proto.RegisterType((*WorkflowQuery)(nil), "workflow.WorkflowQuery") +} + +func init() { proto.RegisterFile("cmd/server/workflow/workflow.proto", fileDescriptor_192bc67c39cca05a) } + +var fileDescriptor_192bc67c39cca05a = []byte{ + // 438 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcd, 0x8e, 0xd3, 0x30, + 0x10, 0x96, 0xdb, 0xb2, 0xda, 0x1a, 0x21, 0x16, 0xb3, 0x40, 0x08, 0xab, 0x28, 0xca, 0xa9, 0x42, + 0xc8, 0x56, 0x17, 0x4e, 0x2b, 0x71, 0x61, 0x0f, 0x08, 0x09, 0x21, 0x48, 0x91, 0x90, 0x96, 0x93, + 0x37, 0x1d, 0xbc, 0xa1, 0x6d, 0x1c, 0xd9, 0xde, 0x2c, 0xe5, 0xc8, 0x81, 0x17, 0xe0, 0xa5, 0x38, + 0x70, 0x40, 0xe2, 0x05, 0x50, 0xc5, 0x83, 0xa0, 0x38, 0xb5, 0x83, 0x48, 0xcb, 0x85, 0x3d, 0xe5, + 0x9b, 0xf9, 0x32, 0xdf, 0x8c, 0xe7, 0x07, 0x27, 0xd9, 0x62, 0xca, 0x34, 0xa8, 0x0a, 0x14, 0xbb, + 0x90, 0x6a, 0xf6, 0x6e, 0x2e, 0x2f, 0x3c, 0xa0, 0xa5, 0x92, 0x46, 0x92, 0x5d, 0x67, 0x87, 0xfb, + 0x42, 0x0a, 0x69, 0x9d, 0xac, 0x46, 0x0d, 0x1f, 0x1e, 0x08, 0x29, 0xc5, 0x1c, 0x18, 0x2f, 0x73, + 0xc6, 0x8b, 0x42, 0x1a, 0x6e, 0x72, 0x59, 0xe8, 0x35, 0x7b, 0x2c, 0x72, 0x73, 0x76, 0x7e, 0x4a, + 0x33, 0xb9, 0x60, 0x5c, 0xd9, 0xf0, 0xf7, 0x16, 0xb0, 0x72, 0x26, 0xea, 0x18, 0xdd, 0x26, 0xae, + 0xc6, 0x7c, 0x5e, 0x9e, 0xf1, 0x31, 0x13, 0x50, 0x80, 0xe2, 0x06, 0xa6, 0x8d, 0x48, 0xf2, 0x08, + 0xdf, 0x7e, 0xb3, 0xfe, 0xe9, 0x58, 0x01, 0x37, 0x90, 0x82, 0x2e, 0x65, 0xa1, 0x81, 0x84, 0x78, + 0x57, 0xad, 0x71, 0x80, 0x62, 0x34, 0x1a, 0xa6, 0xde, 0x4e, 0x34, 0xde, 0x77, 0x51, 0xcf, 0x73, + 0x6d, 0x7c, 0xcc, 0x5b, 0x3c, 0x74, 0x29, 0x75, 0x80, 0xe2, 0xfe, 0xe8, 0xea, 0xe1, 0x63, 0xda, + 0x96, 0x49, 0x5d, 0x99, 0x16, 0xd0, 0x72, 0x26, 0x68, 0x5d, 0x26, 0xf5, 0x6d, 0x71, 0x65, 0x52, + 0xa7, 0x9e, 0xb6, 0x7a, 0x89, 0xc4, 0x7b, 0xde, 0xbd, 0x25, 0x21, 0xba, 0xd4, 0x84, 0x4b, 0x7c, + 0xcd, 0xb9, 0x5f, 0x9d, 0x83, 0x5a, 0x12, 0x82, 0x07, 0x2f, 0xf8, 0xc2, 0xb5, 0xc3, 0x62, 0x72, + 0x80, 0x87, 0xf5, 0x57, 0x97, 0x3c, 0x83, 0xa0, 0x67, 0x89, 0xd6, 0x51, 0x37, 0x71, 0x62, 0xb8, + 0x32, 0xcf, 0xa6, 0x1f, 0x82, 0x7e, 0x8c, 0x46, 0x57, 0x52, 0x6f, 0xd7, 0xdc, 0x4b, 0x2e, 0x60, + 0x92, 0x7f, 0x84, 0x60, 0xd0, 0x70, 0xce, 0x3e, 0xfc, 0xd6, 0xc3, 0xd7, 0x5d, 0xee, 0x09, 0xa8, + 0x2a, 0xcf, 0x80, 0x7c, 0x46, 0x78, 0xa7, 0x99, 0x11, 0xf9, 0xbf, 0x37, 0x86, 0x71, 0x4b, 0x6e, + 0x1e, 0x7e, 0x72, 0xef, 0xd3, 0x8f, 0x5f, 0x5f, 0x7a, 0xb7, 0x92, 0x3d, 0xbb, 0x7b, 0xd5, 0xd8, + 0x6f, 0xd2, 0x11, 0xba, 0x4f, 0x4e, 0xf0, 0xa0, 0x9e, 0x3a, 0xb9, 0xd3, 0x95, 0xb1, 0x7d, 0x0a, + 0xa3, 0x2e, 0xf1, 0xe7, 0x9a, 0x24, 0x77, 0xad, 0xfa, 0x4d, 0x72, 0xe3, 0x6f, 0x75, 0x4d, 0x5e, + 0xe3, 0xfe, 0x53, 0xf8, 0x87, 0x74, 0xd8, 0x25, 0xbc, 0x6c, 0x60, 0x65, 0x09, 0xe9, 0x14, 0xfd, + 0xe4, 0xe8, 0xeb, 0x2a, 0x42, 0xdf, 0x57, 0x11, 0xfa, 0xb9, 0x8a, 0xd0, 0xc9, 0x83, 0xad, 0x87, + 0xb3, 0xe1, 0x66, 0x4f, 0x77, 0xec, 0xa1, 0x3c, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x91, + 0x47, 0xab, 0xd1, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// WorkflowServiceClient is the client API for WorkflowService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type WorkflowServiceClient interface { + Create(ctx context.Context, in *v1alpha1.Workflow, opts ...grpc.CallOption) (*WorkflowCreateResponse, error) + List(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowListResponse, error) + Get(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowResponse, error) +} + +type workflowServiceClient struct { + cc *grpc.ClientConn +} + +func NewWorkflowServiceClient(cc *grpc.ClientConn) WorkflowServiceClient { + return &workflowServiceClient{cc} +} + +func (c *workflowServiceClient) Create(ctx context.Context, in *v1alpha1.Workflow, opts ...grpc.CallOption) (*WorkflowCreateResponse, error) { + out := new(WorkflowCreateResponse) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) List(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowListResponse, error) { + out := new(WorkflowListResponse) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) Get(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowResponse, error) { + out := new(WorkflowResponse) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// WorkflowServiceServer is the server API for WorkflowService service. +type WorkflowServiceServer interface { + Create(context.Context, *v1alpha1.Workflow) (*WorkflowCreateResponse, error) + List(context.Context, *WorkflowQuery) (*WorkflowListResponse, error) + Get(context.Context, *WorkflowQuery) (*WorkflowResponse, error) +} + +func RegisterWorkflowServiceServer(s *grpc.Server, srv WorkflowServiceServer) { + s.RegisterService(&_WorkflowService_serviceDesc, srv) +} + +func _WorkflowService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1alpha1.Workflow) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Create(ctx, req.(*v1alpha1.Workflow)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).List(ctx, req.(*WorkflowQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Get(ctx, req.(*WorkflowQuery)) + } + return interceptor(ctx, in, info, handler) +} + +var _WorkflowService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "workflow.WorkflowService", + HandlerType: (*WorkflowServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _WorkflowService_Create_Handler, + }, + { + MethodName: "List", + Handler: _WorkflowService_List_Handler, + }, + { + MethodName: "Get", + Handler: _WorkflowService_Get_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cmd/server/workflow/workflow.proto", +} + +func (m *WorkflowCreateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowCreateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Response) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Response))) + i += copy(dAtA[i:], m.Response) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *WorkflowListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowListResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Workflows) > 0 { + for _, msg := range m.Workflows { + dAtA[i] = 0xa + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *WorkflowResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Workflows != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(m.Workflows.Size())) + n1, err := m.Workflows.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *WorkflowQuery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowQuery) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Namespace) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + } + if m.StartIdx != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(m.StartIdx)) + } + if m.PageSize != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(m.PageSize)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintWorkflow(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *WorkflowCreateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Response) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowListResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Workflows) > 0 { + for _, e := range m.Workflows { + l = e.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Workflows != nil { + l = m.Workflows.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.StartIdx != 0 { + n += 1 + sovWorkflow(uint64(m.StartIdx)) + } + if m.PageSize != 0 { + n += 1 + sovWorkflow(uint64(m.PageSize)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWorkflow(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWorkflow(x uint64) (n int) { + return sovWorkflow(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *WorkflowCreateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowCreateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowCreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Response = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflows = append(m.Workflows, &v1alpha1.Workflow{}) + if err := m.Workflows[len(m.Workflows)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Workflows == nil { + m.Workflows = &v1alpha1.Workflow{} + } + if err := m.Workflows.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartIdx", wireType) + } + m.StartIdx = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartIdx |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PageSize", wireType) + } + m.PageSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PageSize |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWorkflow(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWorkflow + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthWorkflow + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWorkflow(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthWorkflow + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWorkflow = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWorkflow = fmt.Errorf("proto: integer overflow") +) diff --git a/cmd/server/workflow/workflow.pb.gw.go b/cmd/server/workflow/workflow.pb.gw.go new file mode 100644 index 000000000000..a72907155882 --- /dev/null +++ b/cmd/server/workflow/workflow.pb.gw.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: cmd/server/workflow/workflow.proto + +/* +Package workflow is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package workflow + +import ( + "context" + "io" + "net/http" + + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_WorkflowService_Create_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v1alpha1.Workflow + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Create(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_List_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_WorkflowService_List_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowQuery + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_List_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.List(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_Get_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_WorkflowService_Get_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowQuery + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Get_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterWorkflowServiceHandlerFromEndpoint is same as RegisterWorkflowServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterWorkflowServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterWorkflowServiceHandler(ctx, mux, conn) +} + +// RegisterWorkflowServiceHandler registers the http handlers for service WorkflowService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterWorkflowServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterWorkflowServiceHandlerClient(ctx, mux, NewWorkflowServiceClient(conn)) +} + +// RegisterWorkflowServiceHandlerClient registers the http handlers for service WorkflowService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WorkflowServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WorkflowServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "WorkflowServiceClient" to call the correct interceptors. +func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client WorkflowServiceClient) error { + + mux.Handle("POST", pattern_WorkflowService_Create_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Create_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Create_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_List_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_List_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_List_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_WorkflowService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflow"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflows"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflow"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_WorkflowService_Create_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_List_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_Get_0 = runtime.ForwardResponseMessage +) diff --git a/cmd/server/workflow/workflow.proto b/cmd/server/workflow/workflow.proto new file mode 100644 index 000000000000..25051e6c647f --- /dev/null +++ b/cmd/server/workflow/workflow.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; +option go_package = "github.com/argoproj/argo/cmd/server/workflow"; + + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1/generated.proto"; + +// Workflow Service +// +// Workflow Service API performs CRUD actions against application resources +package workflow; + +message WorkflowCreateResponse{ + string response =1; + +} +message WorkflowListResponse{ + repeated github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow workflows =1; +} + + +message WorkflowResponse{ + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow workflows =1; +} +message WorkflowQuery{ + string Name = 1; + string Namespace = 2; + int32 StartIdx = 3; + int32 PageSize = 4; +} + +service WorkflowService { + rpc Create(github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) returns (WorkflowCreateResponse){ + option (google.api.http) = { + post: "/api/v1/workflow" + body: "*" + }; + } + rpc List(WorkflowQuery) returns (WorkflowListResponse){ + option (google.api.http).get = "/api/v1/workflows"; + } + + rpc Get(WorkflowQuery) returns (WorkflowResponse){ + option (google.api.http).get = "/api/v1/workflow"; + } +} \ No newline at end of file diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json new file mode 100644 index 000000000000..67483ffac0e5 --- /dev/null +++ b/cmd/server/workflow/workflow.swagger.json @@ -0,0 +1,3777 @@ +{ + "swagger": "2.0", + "info": { + "title": "Workflow Service", + "description": "Workflow Service API performs CRUD actions against application resources", + "version": "version not set" + }, + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/api/v1/workflow": { + "get": { + "operationId": "Get", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/workflowWorkflowResponse" + } + } + }, + "parameters": [ + { + "name": "Name", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "Namespace", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "StartIdx", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "PageSize", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "WorkflowService" + ] + }, + "post": { + "operationId": "Create", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/workflowWorkflowCreateResponse" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + ], + "tags": [ + "WorkflowService" + ] + } + }, + "/api/v1/workflows": { + "get": { + "operationId": "List", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/workflowWorkflowListResponse" + } + } + }, + "parameters": [ + { + "name": "Name", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "Namespace", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "StartIdx", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "PageSize", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "WorkflowService" + ] + } + } + }, + "definitions": { + "intstrIntOrString": { + "type": "object", + "properties": { + "type": { + "type": "string", + "format": "int64" + }, + "intVal": { + "type": "integer", + "format": "int32" + }, + "strVal": { + "type": "string" + } + }, + "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", + "title": "IntOrString is a type that can hold an int32 or a string. When used in\nJSON or YAML marshalling and unmarshalling, it produces or consumes the\ninner type. This allows you to have, for example, a JSON field that can\naccept a name or number.\nTODO: Rename to Int32OrString" + }, + "resourceQuantity": { + "type": "object", + "properties": { + "string": { + "type": "string" + } + }, + "description": "Quantity is a fixed-point representation of a number.\nIt provides convenient marshaling/unmarshaling in JSON and YAML,\nin addition to String() and Int64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9\n\u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e\n\u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e\n\u003csign\u003e ::= \"+\" | \"-\"\n\u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e\n\u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e\n\u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\nplaces. Numbers larger or more precise will be capped or rounded up.\n(E.g.: 0.1m will rounded up to 1m.)\nThis may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix\nit had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\".\nThis means that Exponent/suffix will be adjusted up or down (with a\ncorresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a\nfloating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed,\nbut will be re-emitted in their canonical form. (So always use canonical\nform, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without\nwriting some sort of special handling code in the hopes that that will\ncause implementors to also use a fixed point implementation.\n\n+protobuf=true\n+protobuf.embed=string\n+protobuf.options.marshal=false\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:deepcopy-gen=true\n+k8s:openapi-gen=true" + }, + "v1AWSElasticBlockStoreVolumeSource": { + "type": "object", + "properties": { + "volumeID": { + "type": "string", + "title": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "partition": { + "type": "integer", + "format": "int32", + "title": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\".\nIf omitted, the default is \"false\".\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" + } + }, + "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk\nmust also be in the same AWS zone as the kubelet. An AWS EBS disk\ncan only be mounted as read/write once. AWS EBS volumes support\nownership management and SELinux relabeling." + }, + "v1Affinity": { + "type": "object", + "properties": { + "nodeAffinity": { + "$ref": "#/definitions/v1NodeAffinity", + "title": "Describes node affinity scheduling rules for the pod.\n+optional" + }, + "podAffinity": { + "$ref": "#/definitions/v1PodAffinity", + "title": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).\n+optional" + }, + "podAntiAffinity": { + "$ref": "#/definitions/v1PodAntiAffinity", + "title": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).\n+optional" + } + }, + "description": "Affinity is a group of affinity scheduling rules." + }, + "v1AzureDiskVolumeSource": { + "type": "object", + "properties": { + "diskName": { + "type": "string", + "title": "The Name of the data disk in the blob storage" + }, + "diskURI": { + "type": "string", + "title": "The URI the data disk in the blob storage" + }, + "cachingMode": { + "type": "string", + "title": "Host Caching mode: None, Read Only, Read Write.\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "kind": { + "type": "string", + "title": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared" + } + }, + "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." + }, + "v1AzureFileVolumeSource": { + "type": "object", + "properties": { + "secretName": { + "type": "string", + "title": "the name of secret that contains Azure Storage Account Name and Key" + }, + "shareName": { + "type": "string", + "title": "Share Name" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + } + }, + "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod." + }, + "v1CSIVolumeSource": { + "type": "object", + "properties": { + "driver": { + "type": "string", + "description": "Driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Specifies a read-only configuration for the volume.\nDefaults to false (read/write).\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply.\n+optional" + }, + "volumeAttributes": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "VolumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values.\n+optional" + }, + "nodePublishSecretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "NodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed.\n+optional" + } + }, + "title": "Represents a source location of a volume to mount, managed by an external CSI driver" + }, + "v1Capabilities": { + "type": "object", + "properties": { + "add": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Added capabilities\n+optional" + }, + "drop": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Removed capabilities\n+optional" + } + }, + "description": "Adds and removes POSIX capabilities from running containers." + }, + "v1CephFSVolumeSource": { + "type": "object", + "properties": { + "monitors": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Required: Monitors is a collection of Ceph monitors\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it" + }, + "path": { + "type": "string", + "title": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /\n+optional" + }, + "user": { + "type": "string", + "title": "Optional: User is the rados user name, default is admin\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it\n+optional" + }, + "secretFile": { + "type": "string", + "title": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it\n+optional" + } + }, + "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod\nCephfs volumes do not support ownership management or SELinux relabeling." + }, + "v1CinderVolumeSource": { + "type": "object", + "properties": { + "volumeID": { + "type": "string", + "title": "volume id used to identify the volume in cinder\nMore info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "Optional: points to a secret object containing parameters used to connect\nto OpenStack.\n+optional" + } + }, + "description": "Represents a cinder volume resource in Openstack.\nA Cinder volume must exist before mounting to a container.\nThe volume must also be in the same region as the kubelet.\nCinder volumes support ownership management and SELinux relabeling." + }, + "v1ConfigMapEnvSource": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "The ConfigMap to select from." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap must be defined\n+optional" + } + }, + "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment\nvariables with.\n\nThe contents of the target ConfigMap's Data field will represent the\nkey-value pairs as environment variables." + }, + "v1ConfigMapKeySelector": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "The ConfigMap to select from." + }, + "key": { + "type": "string", + "description": "The key to select." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap or it's key must be defined\n+optional" + } + }, + "description": "Selects a key from a ConfigMap." + }, + "v1ConfigMapProjection": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap or it's keys must be defined\n+optional" + } + }, + "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nprojected volume as files using the keys in the Data field as the file names,\nunless the items element is populated with specific mappings of keys to paths.\nNote that this is identical to a configmap volume source without the default\nmode." + }, + "v1ConfigMapVolumeSource": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on created files by default. Must be a\nvalue between 0 and 0777. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap or it's keys must be defined\n+optional" + } + }, + "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nvolume as files using the keys in the Data field as the file names, unless\nthe items element is populated with specific mappings of keys to paths.\nConfigMap volumes support ownership management and SELinux relabeling." + }, + "v1Container": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated." + }, + "image": { + "type": "string", + "title": "Docker image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.\n+optional" + }, + "command": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Entrypoint array. Not executed within a shell.\nThe docker image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + }, + "args": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Arguments to the entrypoint.\nThe docker image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + }, + "workingDir": { + "type": "string", + "title": "Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated.\n+optional" + }, + "ports": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ContainerPort" + }, + "title": "List of ports to expose from the container. Exposing a port here gives\nthe system additional information about the network connections a\ncontainer uses, but is primarily informational. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nCannot be updated.\n+optional\n+patchMergeKey=containerPort\n+patchStrategy=merge\n+listType=map\n+listMapKey=containerPort\n+listMapKey=protocol" + }, + "envFrom": { + "type": "array", + "items": { + "$ref": "#/definitions/v1EnvFromSource" + }, + "title": "List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated.\n+optional" + }, + "env": { + "type": "array", + "items": { + "$ref": "#/definitions/v1EnvVar" + }, + "title": "List of environment variables to set in the container.\nCannot be updated.\n+optional\n+patchMergeKey=name\n+patchStrategy=merge" + }, + "resources": { + "$ref": "#/definitions/v1ResourceRequirements", + "title": "Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/\n+optional" + }, + "volumeMounts": { + "type": "array", + "items": { + "$ref": "#/definitions/v1VolumeMount" + }, + "title": "Pod volumes to mount into the container's filesystem.\nCannot be updated.\n+optional\n+patchMergeKey=mountPath\n+patchStrategy=merge" + }, + "volumeDevices": { + "type": "array", + "items": { + "$ref": "#/definitions/v1VolumeDevice" + }, + "title": "volumeDevices is the list of block devices to be used by the container.\nThis is a beta feature.\n+patchMergeKey=devicePath\n+patchStrategy=merge\n+optional" + }, + "livenessProbe": { + "$ref": "#/definitions/v1Probe", + "title": "Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "readinessProbe": { + "$ref": "#/definitions/v1Probe", + "title": "Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "lifecycle": { + "$ref": "#/definitions/v1Lifecycle", + "title": "Actions that the management system should take in response to container lifecycle events.\nCannot be updated.\n+optional" + }, + "terminationMessagePath": { + "type": "string", + "title": "Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated.\n+optional" + }, + "terminationMessagePolicy": { + "type": "string", + "title": "Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated.\n+optional" + }, + "imagePullPolicy": { + "type": "string", + "title": "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n+optional" + }, + "securityContext": { + "$ref": "#/definitions/v1SecurityContext", + "title": "Security options the pod should run with.\nMore info: https://kubernetes.io/docs/concepts/policy/security-context/\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/\n+optional" + }, + "stdin": { + "type": "boolean", + "format": "boolean", + "title": "Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false.\n+optional" + }, + "stdinOnce": { + "type": "boolean", + "format": "boolean", + "title": "Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false\n+optional" + }, + "tty": { + "type": "boolean", + "format": "boolean", + "title": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false.\n+optional" + } + }, + "description": "A single application container that you want to run within a pod." + }, + "v1ContainerPort": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services.\n+optional" + }, + "hostPort": { + "type": "integer", + "format": "int32", + "title": "Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 \u003c x \u003c 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this.\n+optional" + }, + "containerPort": { + "type": "integer", + "format": "int32", + "description": "Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 \u003c x \u003c 65536." + }, + "protocol": { + "type": "string", + "title": "Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\".\n+optional" + }, + "hostIP": { + "type": "string", + "title": "What host IP to bind the external port to.\n+optional" + } + }, + "description": "ContainerPort represents a network port in a single container." + }, + "v1DownwardAPIProjection": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1DownwardAPIVolumeFile" + }, + "title": "Items is a list of DownwardAPIVolume file\n+optional" + } + }, + "description": "Represents downward API info for projecting into a projected volume.\nNote that this is identical to a downwardAPI volume source without the default\nmode." + }, + "v1DownwardAPIVolumeFile": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" + }, + "fieldRef": { + "$ref": "#/definitions/v1ObjectFieldSelector", + "title": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.\n+optional" + }, + "resourceFieldRef": { + "$ref": "#/definitions/v1ResourceFieldSelector", + "title": "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.\n+optional" + }, + "mode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on this file, must be a value between 0\nand 0777. If not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "title": "DownwardAPIVolumeFile represents information to create the file containing the pod field" + }, + "v1DownwardAPIVolumeSource": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1DownwardAPIVolumeFile" + }, + "title": "Items is a list of downward API volume file\n+optional" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on created files by default. Must be a\nvalue between 0 and 0777. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "description": "DownwardAPIVolumeSource represents a volume containing downward API info.\nDownward API volumes support ownership management and SELinux relabeling." + }, + "v1EmptyDirVolumeSource": { + "type": "object", + "properties": { + "medium": { + "type": "string", + "title": "What type of storage medium should back this directory.\nThe default is \"\" which means to use the node's default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" + }, + "sizeLimit": { + "$ref": "#/definitions/resourceQuantity", + "title": "Total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: http://kubernetes.io/docs/user-guide/volumes#emptydir\n+optional" + } + }, + "description": "Represents an empty directory for a pod.\nEmpty directory volumes support ownership management and SELinux relabeling." + }, + "v1EnvFromSource": { + "type": "object", + "properties": { + "prefix": { + "type": "string", + "title": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.\n+optional" + }, + "configMapRef": { + "$ref": "#/definitions/v1ConfigMapEnvSource", + "title": "The ConfigMap to select from\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1SecretEnvSource", + "title": "The Secret to select from\n+optional" + } + }, + "title": "EnvFromSource represents the source of a set of ConfigMaps" + }, + "v1EnvVar": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the environment variable. Must be a C_IDENTIFIER." + }, + "value": { + "type": "string", + "title": "Variable references $(VAR_NAME) are expanded\nusing the previous defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. The $(VAR_NAME)\nsyntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped\nreferences will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\".\n+optional" + }, + "valueFrom": { + "$ref": "#/definitions/v1EnvVarSource", + "title": "Source for the environment variable's value. Cannot be used if value is not empty.\n+optional" + } + }, + "description": "EnvVar represents an environment variable present in a Container." + }, + "v1EnvVarSource": { + "type": "object", + "properties": { + "fieldRef": { + "$ref": "#/definitions/v1ObjectFieldSelector", + "title": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.\n+optional" + }, + "resourceFieldRef": { + "$ref": "#/definitions/v1ResourceFieldSelector", + "title": "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.\n+optional" + }, + "configMapKeyRef": { + "$ref": "#/definitions/v1ConfigMapKeySelector", + "title": "Selects a key of a ConfigMap.\n+optional" + }, + "secretKeyRef": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "Selects a key of a secret in the pod's namespace\n+optional" + } + }, + "description": "EnvVarSource represents a source for the value of an EnvVar." + }, + "v1ExecAction": { + "type": "object", + "properties": { + "command": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\n+optional" + } + }, + "description": "ExecAction describes a \"run in container\" action." + }, + "v1FCVolumeSource": { + "type": "object", + "properties": { + "targetWWNs": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Optional: FC target worldwide names (WWNs)\n+optional" + }, + "lun": { + "type": "integer", + "format": "int32", + "title": "Optional: FC target lun number\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "wwids": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.\n+optional" + } + }, + "description": "Represents a Fibre Channel volume.\nFibre Channel volumes can only be mounted as read/write once.\nFibre Channel volumes support ownership management and SELinux relabeling." + }, + "v1Fields": { + "type": "object", + "properties": { + "map": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1Fields" + }, + "description": "Map stores a set of fields in a data structure like a Trie.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set,\nor a string representing a sub-field or item. The string will follow one of these four formats:\n'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map\n'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item\n'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list\n'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values\nIf a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + } + }, + "title": "Fields stores a set of fields in a data structure like a Trie.\nTo understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff" + }, + "v1FlexVolumeSource": { + "type": "object", + "properties": { + "driver": { + "type": "string", + "description": "Driver is the name of the driver to use for this volume." + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "Optional: SecretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts.\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "options": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Optional: Extra command options if any.\n+optional" + } + }, + "description": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin." + }, + "v1FlockerVolumeSource": { + "type": "object", + "properties": { + "datasetName": { + "type": "string", + "title": "Name of the dataset stored as metadata -\u003e name on the dataset for Flocker\nshould be considered as deprecated\n+optional" + }, + "datasetUUID": { + "type": "string", + "title": "UUID of the dataset. This is unique identifier of a Flocker dataset\n+optional" + } + }, + "description": "Represents a Flocker volume mounted by the Flocker agent.\nOne and only one of datasetName and datasetUUID should be set.\nFlocker volumes do not support ownership management or SELinux relabeling." + }, + "v1GCEPersistentDiskVolumeSource": { + "type": "object", + "properties": { + "pdName": { + "type": "string", + "title": "Unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "partition": { + "type": "integer", + "format": "int32", + "title": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + } + }, + "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must\nalso be in the same GCE project and zone as the kubelet. A GCE PD\ncan only be mounted as read/write once or read-only many times. GCE\nPDs support ownership management and SELinux relabeling." + }, + "v1GitRepoVolumeSource": { + "type": "object", + "properties": { + "repository": { + "type": "string", + "title": "Repository URL" + }, + "revision": { + "type": "string", + "title": "Commit hash for the specified revision.\n+optional" + }, + "directory": { + "type": "string", + "title": "Target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name.\n+optional" + } + }, + "description": "Represents a volume that is populated with the contents of a git repository.\nGit repo volumes do not support ownership management.\nGit repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container." + }, + "v1GlusterfsVolumeSource": { + "type": "object", + "properties": { + "endpoints": { + "type": "string", + "title": "EndpointsName is the endpoint name that details Glusterfs topology.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod" + }, + "path": { + "type": "string", + "title": "Path is the Glusterfs volume path.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod\n+optional" + } + }, + "description": "Represents a Glusterfs mount that lasts the lifetime of a pod.\nGlusterfs volumes do not support ownership management or SELinux relabeling." + }, + "v1HTTPGetAction": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path to access on the HTTP server.\n+optional" + }, + "port": { + "$ref": "#/definitions/intstrIntOrString", + "description": "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + }, + "host": { + "type": "string", + "title": "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead.\n+optional" + }, + "scheme": { + "type": "string", + "title": "Scheme to use for connecting to the host.\nDefaults to HTTP.\n+optional" + }, + "httpHeaders": { + "type": "array", + "items": { + "$ref": "#/definitions/v1HTTPHeader" + }, + "title": "Custom headers to set in the request. HTTP allows repeated headers.\n+optional" + } + }, + "description": "HTTPGetAction describes an action based on HTTP Get requests." + }, + "v1HTTPHeader": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "The header field name" + }, + "value": { + "type": "string", + "title": "The header field value" + } + }, + "title": "HTTPHeader describes a custom header to be used in HTTP probes" + }, + "v1Handler": { + "type": "object", + "properties": { + "exec": { + "$ref": "#/definitions/v1ExecAction", + "title": "One and only one of the following should be specified.\nExec specifies the action to take.\n+optional" + }, + "httpGet": { + "$ref": "#/definitions/v1HTTPGetAction", + "title": "HTTPGet specifies the http request to perform.\n+optional" + }, + "tcpSocket": { + "$ref": "#/definitions/v1TCPSocketAction", + "title": "TCPSocket specifies an action involving a TCP port.\nTCP hooks not yet supported\nTODO: implement a realistic TCP lifecycle hook\n+optional" + } + }, + "description": "Handler defines a specific action that should be taken\nTODO: pass structured data to these actions, and document that data here." + }, + "v1HostAlias": { + "type": "object", + "properties": { + "ip": { + "type": "string", + "description": "IP address of the host file entry." + }, + "hostnames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Hostnames for the above IP address." + } + }, + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the\npod's hosts file." + }, + "v1HostPathVolumeSource": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + }, + "type": { + "type": "string", + "title": "Type for HostPath Volume\nDefaults to \"\"\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n+optional" + } + }, + "description": "Represents a host path mapped into a pod.\nHost path volumes do not support ownership management or SELinux relabeling." + }, + "v1ISCSIVolumeSource": { + "type": "object", + "properties": { + "targetPortal": { + "type": "string", + "description": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + }, + "iqn": { + "type": "string", + "description": "Target iSCSI Qualified Name." + }, + "lun": { + "type": "integer", + "format": "int32", + "description": "iSCSI Target Lun number." + }, + "iscsiInterface": { + "type": "string", + "title": "iSCSI Interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp).\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\n+optional" + }, + "portals": { + "type": "array", + "items": { + "type": "string" + }, + "title": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).\n+optional" + }, + "chapAuthDiscovery": { + "type": "boolean", + "format": "boolean", + "title": "whether support iSCSI Discovery CHAP authentication\n+optional" + }, + "chapAuthSession": { + "type": "boolean", + "format": "boolean", + "title": "whether support iSCSI Session CHAP authentication\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "CHAP Secret for iSCSI target and initiator authentication\n+optional" + }, + "initiatorName": { + "type": "string", + "title": "Custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n\u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.\n+optional" + } + }, + "description": "Represents an ISCSI disk.\nISCSI volumes can only be mounted as read/write once.\nISCSI volumes support ownership management and SELinux relabeling." + }, + "v1Initializer": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "name of the process that is responsible for initializing this object." + } + }, + "description": "Initializer is information about an initializer that has not yet completed." + }, + "v1Initializers": { + "type": "object", + "properties": { + "pending": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Initializer" + }, + "title": "Pending is a list of initializers that must execute in order before this object is visible.\nWhen the last pending initializer is removed, and no failing result is set, the initializers\nstruct will be set to nil and the object is considered as initialized and visible to all\nclients.\n+patchMergeKey=name\n+patchStrategy=merge" + }, + "result": { + "$ref": "#/definitions/v1Status", + "description": "If result is set with the Failure field, the object will be persisted to storage and then deleted,\nensuring that other clients can observe the deletion." + } + }, + "description": "Initializers tracks the progress of initialization." + }, + "v1KeyToPath": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "The key to project." + }, + "path": { + "type": "string", + "description": "The relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + }, + "mode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on this file, must be a value between 0\nand 0777. If not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "description": "Maps a string key to a path within a volume." + }, + "v1LabelSelector": { + "type": "object", + "properties": { + "matchLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n+optional" + }, + "matchExpressions": { + "type": "array", + "items": { + "$ref": "#/definitions/v1LabelSelectorRequirement" + }, + "title": "matchExpressions is a list of label selector requirements. The requirements are ANDed.\n+optional" + } + }, + "description": "A label selector is a label query over a set of resources. The result of matchLabels and\nmatchExpressions are ANDed. An empty label selector matches all objects. A null\nlabel selector matches no objects." + }, + "v1LabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "key is the label key that the selector applies to.\n+patchMergeKey=key\n+patchStrategy=merge" + }, + "operator": { + "type": "string", + "description": "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + }, + "values": { + "type": "array", + "items": { + "type": "string" + }, + "title": "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.\n+optional" + } + }, + "description": "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." + }, + "v1Lifecycle": { + "type": "object", + "properties": { + "postStart": { + "$ref": "#/definitions/v1Handler", + "title": "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" + }, + "preStop": { + "$ref": "#/definitions/v1Handler", + "title": "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The reason for termination is passed to the\nhandler. The Pod's termination grace period countdown begins before the\nPreStop hooked is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod. Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" + } + }, + "description": "Lifecycle describes actions that the management system should take in response to container lifecycle\nevents. For the PostStart and PreStop lifecycle handlers, management of the container blocks\nuntil the action is complete, unless the container process fails, in which case the handler is aborted." + }, + "v1ListMeta": { + "type": "object", + "properties": { + "selfLink": { + "type": "string", + "title": "selfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "String that identifies the server's internal version of this object that\ncan be used by clients to determine when objects have changed.\nValue must be treated as opaque by clients and passed unmodified back to the server.\nPopulated by the system.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "continue": { + "type": "string", + "description": "continue may be set if the user set a limit on the number of items returned, and indicates that\nthe server has more data available. The value is opaque and may be used to issue another request\nto the endpoint that served this list to retrieve the next set of available objects. Continuing a\nconsistent list may not be possible if the server configuration has changed or more than a few\nminutes have passed. The resourceVersion field returned when using this continue value will be\nidentical to the value in the first response, unless you have received this token from an error\nmessage." + } + }, + "description": "ListMeta describes metadata that synthetic resources must have, including lists and\nvarious status objects. A resource may have only one of {ObjectMeta, ListMeta}." + }, + "v1LocalObjectReference": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?\n+optional" + } + }, + "description": "LocalObjectReference contains enough information to let you locate the\nreferenced object inside the same namespace." + }, + "v1ManagedFieldsEntry": { + "type": "object", + "properties": { + "manager": { + "type": "string", + "description": "Manager is an identifier of the workflow managing these fields." + }, + "operation": { + "type": "string", + "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created.\nThe only valid values for this field are 'Apply' and 'Update'." + }, + "apiVersion": { + "type": "string", + "description": "APIVersion defines the version of this resource that this field set\napplies to. The format is \"group/version\" just like the top-level\nAPIVersion field. It is necessary to track the version of a field\nset because it cannot be automatically converted." + }, + "time": { + "$ref": "#/definitions/v1Time", + "title": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'\n+optional" + }, + "fields": { + "$ref": "#/definitions/v1Fields", + "title": "Fields identifies a set of fields.\n+optional" + } + }, + "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource\nthat the fieldset applies to." + }, + "v1NFSVolumeSource": { + "type": "object", + "properties": { + "server": { + "type": "string", + "title": "Server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + }, + "path": { + "type": "string", + "title": "Path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force\nthe NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" + } + }, + "description": "Represents an NFS mount that lasts the lifetime of a pod.\nNFS volumes do not support ownership management or SELinux relabeling." + }, + "v1NodeAffinity": { + "type": "object", + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "$ref": "#/definitions/v1NodeSelector", + "title": "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to an update), the system\nmay or may not try to eventually evict the pod from its node.\n+optional" + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1PreferredSchedulingTerm" + }, + "title": "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred.\n+optional" + } + }, + "description": "Node affinity is a group of node affinity scheduling rules." + }, + "v1NodeSelector": { + "type": "object", + "properties": { + "nodeSelectorTerms": { + "type": "array", + "items": { + "$ref": "#/definitions/v1NodeSelectorTerm" + }, + "description": "Required. A list of node selector terms. The terms are ORed." + } + }, + "description": "A node selector represents the union of the results of one or more label queries\nover a set of nodes; that is, it represents the OR of the selectors represented\nby the node selector terms." + }, + "v1NodeSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "The label key that the selector applies to." + }, + "operator": { + "type": "string", + "description": "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + }, + "values": { + "type": "array", + "items": { + "type": "string" + }, + "title": "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch.\n+optional" + } + }, + "description": "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." + }, + "v1NodeSelectorTerm": { + "type": "object", + "properties": { + "matchExpressions": { + "type": "array", + "items": { + "$ref": "#/definitions/v1NodeSelectorRequirement" + }, + "title": "A list of node selector requirements by node's labels.\n+optional" + }, + "matchFields": { + "type": "array", + "items": { + "$ref": "#/definitions/v1NodeSelectorRequirement" + }, + "title": "A list of node selector requirements by node's fields.\n+optional" + } + }, + "description": "A null or empty node selector term matches no objects. The requirements of\nthem are ANDed.\nThe TopologySelectorTerm type implements a subset of the NodeSelectorTerm." + }, + "v1ObjectFieldSelector": { + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "title": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".\n+optional" + }, + "fieldPath": { + "type": "string", + "description": "Path of the field to select in the specified API version." + } + }, + "description": "ObjectFieldSelector selects an APIVersioned field of an object." + }, + "v1ObjectMeta": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional" + }, + "generateName": { + "type": "string", + "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency\n+optional" + }, + "namespace": { + "type": "string", + "description": "Namespace defines the space within each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n+optional" + }, + "selfLink": { + "type": "string", + "title": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n+optional" + }, + "uid": { + "type": "string", + "description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional" + }, + "resourceVersion": { + "type": "string", + "description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "generation": { + "type": "string", + "format": "int64", + "title": "A sequence number representing a specific generation of the desired state.\nPopulated by the system. Read-only.\n+optional" + }, + "creationTimestamp": { + "$ref": "#/definitions/v1Time", + "description": "CreationTimestamp is a timestamp representing the server time when this object was\ncreated. It is not guaranteed to be set in happens-before order across separate operations.\nClients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system.\nRead-only.\nNull for lists.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata\n+optional" + }, + "deletionTimestamp": { + "$ref": "#/definitions/v1Time", + "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This\nfield is set by the server when a graceful deletion is requested by the user, and is not\ndirectly settable by a client. The resource is expected to be deleted (no longer visible\nfrom resource lists, and not reachable by name) after the time in this field, once the\nfinalizers list is empty. As long as the finalizers list contains items, deletion is blocked.\nOnce the deletionTimestamp is set, this value may not be unset or be set further into the\nfuture, although it may be shortened or the resource may be deleted prior to this time.\nFor example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react\nby sending a graceful termination signal to the containers in the pod. After that 30 seconds,\nthe Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,\nremove the pod from the API. In the presence of network partitions, this object may still\nexist after this timestamp, until an administrator or automated process can determine the\nresource is fully terminated.\nIf not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata\n+optional" + }, + "deletionGracePeriodSeconds": { + "type": "string", + "format": "int64", + "title": "Number of seconds allowed for this object to gracefully terminate before\nit will be removed from the system. Only set when deletionTimestamp is also set.\nMay only be shortened.\nRead-only.\n+optional" + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional" + }, + "ownerReferences": { + "type": "array", + "items": { + "$ref": "#/definitions/v1OwnerReference" + }, + "title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge" + }, + "initializers": { + "$ref": "#/definitions/v1Initializers", + "description": "An initializer is a controller which enforces some system invariant at object creation time.\nThis field is a list of initializers that have not yet acted on this object. If nil or empty,\nthis object has been completely initialized. Otherwise, the object is considered uninitialized\nand is hidden (in list/watch and get calls) from clients that haven't explicitly asked to\nobserve uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers.\nOnly privileged users may set or modify this list. Once it is empty, it may not be modified further\nby any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15." + }, + "finalizers": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\n+optional\n+patchStrategy=merge" + }, + "clusterName": { + "type": "string", + "title": "The name of the cluster which the object belongs to.\nThis is used to distinguish resources with same name and namespace in different clusters.\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\n+optional" + }, + "managedFields": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ManagedFieldsEntry" + }, + "description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.\n\n+optional" + } + }, + "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects\nusers must create." + }, + "v1OwnerReference": { + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "description": "API version of the referent." + }, + "kind": { + "type": "string", + "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + }, + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names" + }, + "uid": { + "type": "string", + "title": "UID of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids" + }, + "controller": { + "type": "boolean", + "format": "boolean", + "title": "If true, this reference points to the managing controller.\n+optional" + }, + "blockOwnerDeletion": { + "type": "boolean", + "format": "boolean", + "title": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional" + } + }, + "description": "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field." + }, + "v1PersistentVolumeClaim": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/v1ObjectMeta", + "title": "Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata\n+optional" + }, + "spec": { + "$ref": "#/definitions/v1PersistentVolumeClaimSpec", + "title": "Spec defines the desired characteristics of a volume requested by a pod author.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional" + }, + "status": { + "$ref": "#/definitions/v1PersistentVolumeClaimStatus", + "title": "Status represents the current information/status of a persistent volume claim.\nRead-only.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional" + } + }, + "title": "PersistentVolumeClaim is a user's request for and claim to a persistent volume" + }, + "v1PersistentVolumeClaimCondition": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "status": { + "type": "string" + }, + "lastProbeTime": { + "$ref": "#/definitions/v1Time", + "title": "Last time we probed the condition.\n+optional" + }, + "lastTransitionTime": { + "$ref": "#/definitions/v1Time", + "title": "Last time the condition transitioned from one status to another.\n+optional" + }, + "reason": { + "type": "string", + "title": "Unique, this should be a short, machine understandable string that gives the reason\nfor condition's last transition. If it reports \"ResizeStarted\" that means the underlying\npersistent volume is being resized.\n+optional" + }, + "message": { + "type": "string", + "title": "Human-readable message indicating details about last transition.\n+optional" + } + }, + "title": "PersistentVolumeClaimCondition contails details about state of pvc" + }, + "v1PersistentVolumeClaimSpec": { + "type": "object", + "properties": { + "accessModes": { + "type": "array", + "items": { + "type": "string" + }, + "title": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional" + }, + "selector": { + "$ref": "#/definitions/v1LabelSelector", + "title": "A label query over volumes to consider for binding.\n+optional" + }, + "resources": { + "$ref": "#/definitions/v1ResourceRequirements", + "title": "Resources represents the minimum resources the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources\n+optional" + }, + "volumeName": { + "type": "string", + "title": "VolumeName is the binding reference to the PersistentVolume backing this claim.\n+optional" + }, + "storageClassName": { + "type": "string", + "title": "Name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\n+optional" + }, + "volumeMode": { + "type": "string", + "title": "volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\nThis is a beta feature.\n+optional" + }, + "dataSource": { + "$ref": "#/definitions/v1TypedLocalObjectReference", + "title": "This field requires the VolumeSnapshotDataSource alpha feature gate to be\nenabled and currently VolumeSnapshot is the only supported data source.\nIf the provisioner can support VolumeSnapshot data source, it will create\na new volume and data will be restored to the volume at the same time.\nIf the provisioner does not support VolumeSnapshot data source, volume will\nnot be created and the failure will be reported as an event.\nIn the future, we plan to support more data source types and the behavior\nof the provisioner may change.\n+optional" + } + }, + "title": "PersistentVolumeClaimSpec describes the common attributes of storage devices\nand allows a Source for provider-specific attributes" + }, + "v1PersistentVolumeClaimStatus": { + "type": "object", + "properties": { + "phase": { + "type": "string", + "title": "Phase represents the current phase of PersistentVolumeClaim.\n+optional" + }, + "accessModes": { + "type": "array", + "items": { + "type": "string" + }, + "title": "AccessModes contains the actual access modes the volume backing the PVC has.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional" + }, + "capacity": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/resourceQuantity" + }, + "title": "Represents the actual resources of the underlying volume.\n+optional" + }, + "conditions": { + "type": "array", + "items": { + "$ref": "#/definitions/v1PersistentVolumeClaimCondition" + }, + "title": "Current Condition of persistent volume claim. If underlying persistent volume is being\nresized then the Condition will be set to 'ResizeStarted'.\n+optional\n+patchMergeKey=type\n+patchStrategy=merge" + } + }, + "description": "PersistentVolumeClaimStatus is the current status of a persistent volume claim." + }, + "v1PersistentVolumeClaimVolumeSource": { + "type": "object", + "properties": { + "claimName": { + "type": "string", + "title": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Will force the ReadOnly setting in VolumeMounts.\nDefault false.\n+optional" + } + }, + "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.\nThis volume finds the bound PV and mounts that volume for the pod. A\nPersistentVolumeClaimVolumeSource is, essentially, a wrapper around another\ntype of volume that is owned by someone else (the system)." + }, + "v1PhotonPersistentDiskVolumeSource": { + "type": "object", + "properties": { + "pdID": { + "type": "string", + "title": "ID that identifies Photon Controller persistent disk" + }, + "fsType": { + "type": "string", + "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + } + }, + "description": "Represents a Photon Controller persistent disk resource." + }, + "v1PodAffinity": { + "type": "object", + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1PodAffinityTerm" + }, + "title": "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied.\n+optional" + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1WeightedPodAffinityTerm" + }, + "title": "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred.\n+optional" + } + }, + "description": "Pod affinity is a group of inter pod affinity scheduling rules." + }, + "v1PodAffinityTerm": { + "type": "object", + "properties": { + "labelSelector": { + "$ref": "#/definitions/v1LabelSelector", + "title": "A label query over a set of resources, in this case pods.\n+optional" + }, + "namespaces": { + "type": "array", + "items": { + "type": "string" + }, + "title": "namespaces specifies which namespaces the labelSelector applies to (matches against);\nnull or empty list means \"this pod's namespace\"\n+optional" + }, + "topologyKey": { + "type": "string", + "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + } + }, + "title": "Defines a set of pods (namely those matching the labelSelector\nrelative to the given namespace(s)) that this pod should be\nco-located (affinity) or not co-located (anti-affinity) with,\nwhere co-located is defined as running on a node whose value of\nthe label with key \u003ctopologyKey\u003e matches that of any node on which\na pod of the set of pods is running" + }, + "v1PodAntiAffinity": { + "type": "object", + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1PodAffinityTerm" + }, + "title": "If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied.\n+optional" + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1WeightedPodAffinityTerm" + }, + "title": "The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred.\n+optional" + } + }, + "description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules." + }, + "v1PodDNSConfig": { + "type": "object", + "properties": { + "nameservers": { + "type": "array", + "items": { + "type": "string" + }, + "title": "A list of DNS name server IP addresses.\nThis will be appended to the base nameservers generated from DNSPolicy.\nDuplicated nameservers will be removed.\n+optional" + }, + "searches": { + "type": "array", + "items": { + "type": "string" + }, + "title": "A list of DNS search domains for host-name lookup.\nThis will be appended to the base search paths generated from DNSPolicy.\nDuplicated search paths will be removed.\n+optional" + }, + "options": { + "type": "array", + "items": { + "$ref": "#/definitions/v1PodDNSConfigOption" + }, + "title": "A list of DNS resolver options.\nThis will be merged with the base options generated from DNSPolicy.\nDuplicated entries will be removed. Resolution options given in Options\nwill override those that appear in the base DNSPolicy.\n+optional" + } + }, + "description": "PodDNSConfig defines the DNS parameters of a pod in addition to\nthose generated from DNSPolicy." + }, + "v1PodDNSConfigOption": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Required." + }, + "value": { + "type": "string", + "title": "+optional" + } + }, + "description": "PodDNSConfigOption defines DNS resolver options of a pod." + }, + "v1PodSecurityContext": { + "type": "object", + "properties": { + "seLinuxOptions": { + "$ref": "#/definitions/v1SELinuxOptions", + "title": "The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\n+optional" + }, + "runAsUser": { + "type": "string", + "format": "int64", + "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional" + }, + "runAsGroup": { + "type": "string", + "format": "int64", + "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional" + }, + "runAsNonRoot": { + "type": "boolean", + "format": "boolean", + "title": "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "supplementalGroups": { + "type": "array", + "items": { + "type": "string", + "format": "int64" + }, + "title": "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID. If unspecified, no groups will be added to\nany container.\n+optional" + }, + "fsGroup": { + "type": "string", + "format": "int64", + "description": "1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\n+optional", + "title": "A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:" + }, + "sysctls": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Sysctl" + }, + "title": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\n+optional" + } + }, + "description": "PodSecurityContext holds pod-level security attributes and common container settings.\nSome fields are also present in container.securityContext. Field values of\ncontainer.securityContext take precedence over field values of PodSecurityContext." + }, + "v1PortworxVolumeSource": { + "type": "object", + "properties": { + "volumeID": { + "type": "string", + "title": "VolumeID uniquely identifies a Portworx volume" + }, + "fsType": { + "type": "string", + "description": "FSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + } + }, + "description": "PortworxVolumeSource represents a Portworx volume resource." + }, + "v1PreferredSchedulingTerm": { + "type": "object", + "properties": { + "weight": { + "type": "integer", + "format": "int32", + "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100." + }, + "preference": { + "$ref": "#/definitions/v1NodeSelectorTerm", + "description": "A node selector term, associated with the corresponding weight." + } + }, + "description": "An empty preferred scheduling term matches all objects with implicit weight 0\n(i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op)." + }, + "v1Probe": { + "type": "object", + "properties": { + "handler": { + "$ref": "#/definitions/v1Handler", + "title": "The action taken to determine the health of a container" + }, + "initialDelaySeconds": { + "type": "integer", + "format": "int32", + "title": "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "timeoutSeconds": { + "type": "integer", + "format": "int32", + "title": "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "periodSeconds": { + "type": "integer", + "format": "int32", + "title": "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.\n+optional" + }, + "successThreshold": { + "type": "integer", + "format": "int32", + "title": "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness. Minimum value is 1.\n+optional" + }, + "failureThreshold": { + "type": "integer", + "format": "int32", + "title": "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.\n+optional" + } + }, + "description": "Probe describes a health check to be performed against a container to determine whether it is\nalive or ready to receive traffic." + }, + "v1ProjectedVolumeSource": { + "type": "object", + "properties": { + "sources": { + "type": "array", + "items": { + "$ref": "#/definitions/v1VolumeProjection" + }, + "title": "list of volume projections" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Mode bits to use on created files by default. Must be a value between\n0 and 0777.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "title": "Represents a projected volume source" + }, + "v1QuobyteVolumeSource": { + "type": "object", + "properties": { + "registry": { + "type": "string", + "title": "Registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes" + }, + "volume": { + "type": "string", + "description": "Volume is a string that references an already created Quobyte volume by name." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false.\n+optional" + }, + "user": { + "type": "string", + "title": "User to map volume access to\nDefaults to serivceaccount user\n+optional" + }, + "group": { + "type": "string", + "title": "Group to map volume access to\nDefault is no group\n+optional" + }, + "tenant": { + "type": "string", + "title": "Tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin\n+optional" + } + }, + "description": "Represents a Quobyte mount that lasts the lifetime of a pod.\nQuobyte volumes do not support ownership management or SELinux relabeling." + }, + "v1RBDVolumeSource": { + "type": "object", + "properties": { + "monitors": { + "type": "array", + "items": { + "type": "string" + }, + "title": "A collection of Ceph monitors.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it" + }, + "image": { + "type": "string", + "title": "The rados image name.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "pool": { + "type": "string", + "title": "The rados pool name.\nDefault is rbd.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "user": { + "type": "string", + "title": "The rados user name.\nDefault is admin.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "keyring": { + "type": "string", + "title": "Keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "SecretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + } + }, + "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod.\nRBD volumes support ownership management and SELinux relabeling." + }, + "v1ResourceFieldSelector": { + "type": "object", + "properties": { + "containerName": { + "type": "string", + "title": "Container name: required for volumes, optional for env vars\n+optional" + }, + "resource": { + "type": "string", + "title": "Required: resource to select" + }, + "divisor": { + "$ref": "#/definitions/resourceQuantity", + "title": "Specifies the output format of the exposed resources, defaults to \"1\"\n+optional" + } + }, + "title": "ResourceFieldSelector represents container resources (cpu, memory) and their output format" + }, + "v1ResourceRequirements": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/resourceQuantity" + }, + "title": "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/\n+optional" + }, + "requests": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/resourceQuantity" + }, + "title": "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/\n+optional" + } + }, + "description": "ResourceRequirements describes the compute resource requirements." + }, + "v1SELinuxOptions": { + "type": "object", + "properties": { + "user": { + "type": "string", + "title": "User is a SELinux user label that applies to the container.\n+optional" + }, + "role": { + "type": "string", + "title": "Role is a SELinux role label that applies to the container.\n+optional" + }, + "type": { + "type": "string", + "title": "Type is a SELinux type label that applies to the container.\n+optional" + }, + "level": { + "type": "string", + "title": "Level is SELinux level label that applies to the container.\n+optional" + } + }, + "title": "SELinuxOptions are the labels to be applied to the container" + }, + "v1ScaleIOVolumeSource": { + "type": "object", + "properties": { + "gateway": { + "type": "string", + "description": "The host address of the ScaleIO API Gateway." + }, + "system": { + "type": "string", + "description": "The name of the storage system as configured in ScaleIO." + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "SecretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail." + }, + "sslEnabled": { + "type": "boolean", + "format": "boolean", + "title": "Flag to enable/disable SSL communication with Gateway, default false\n+optional" + }, + "protectionDomain": { + "type": "string", + "title": "The name of the ScaleIO Protection Domain for the configured storage.\n+optional" + }, + "storagePool": { + "type": "string", + "title": "The ScaleIO Storage Pool associated with the protection domain.\n+optional" + }, + "storageMode": { + "type": "string", + "title": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned.\n+optional" + }, + "volumeName": { + "type": "string", + "description": "The name of a volume already created in the ScaleIO system\nthat is associated with this volume source." + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\".\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + } + }, + "title": "ScaleIOVolumeSource represents a persistent ScaleIO volume" + }, + "v1SecretEnvSource": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "The Secret to select from." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret must be defined\n+optional" + } + }, + "description": "SecretEnvSource selects a Secret to populate the environment\nvariables with.\n\nThe contents of the target Secret's Data field will represent the\nkey-value pairs as environment variables." + }, + "v1SecretKeySelector": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "The name of the secret in the pod's namespace to select from." + }, + "key": { + "type": "string", + "description": "The key of the secret to select from. Must be a valid secret key." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret or it's key must be defined\n+optional" + } + }, + "description": "SecretKeySelector selects a key of a Secret." + }, + "v1SecretProjection": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret or its key must be defined\n+optional" + } + }, + "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a\nprojected volume as files using the keys in the Data field as the file names.\nNote that this is identical to a secret volume source without the default\nmode." + }, + "v1SecretVolumeSource": { + "type": "object", + "properties": { + "secretName": { + "type": "string", + "title": "Name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on created files by default. Must be a\nvalue between 0 and 0777. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret or it's keys must be defined\n+optional" + } + }, + "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume\nas files using the keys in the Data field as the file names.\nSecret volumes support ownership management and SELinux relabeling." + }, + "v1SecurityContext": { + "type": "object", + "properties": { + "capabilities": { + "$ref": "#/definitions/v1Capabilities", + "title": "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\n+optional" + }, + "privileged": { + "type": "boolean", + "format": "boolean", + "title": "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\n+optional" + }, + "seLinuxOptions": { + "$ref": "#/definitions/v1SELinuxOptions", + "title": "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsUser": { + "type": "string", + "format": "int64", + "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsGroup": { + "type": "string", + "format": "int64", + "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsNonRoot": { + "type": "boolean", + "format": "boolean", + "title": "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "readOnlyRootFilesystem": { + "type": "boolean", + "format": "boolean", + "title": "Whether this container has a read-only root filesystem.\nDefault is false.\n+optional" + }, + "allowPrivilegeEscalation": { + "type": "boolean", + "format": "boolean", + "title": "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\n+optional" + }, + "procMount": { + "type": "string", + "title": "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\n+optional" + } + }, + "description": "SecurityContext holds security configuration that will be applied to a container.\nSome fields are present in both SecurityContext and PodSecurityContext. When both\nare set, the values in SecurityContext take precedence." + }, + "v1ServiceAccountTokenProjection": { + "type": "object", + "properties": { + "audience": { + "type": "string", + "title": "Audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver.\n+optional" + }, + "expirationSeconds": { + "type": "string", + "format": "int64", + "title": "ExpirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes.\n+optional" + }, + "path": { + "type": "string", + "description": "Path is the path relative to the mount point of the file to project the\ntoken into." + } + }, + "description": "ServiceAccountTokenProjection represents a projected service account token\nvolume. This projection can be used to insert a service account token into\nthe pods runtime filesystem for use against APIs (Kubernetes API Server or\notherwise)." + }, + "v1Status": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/v1ListMeta", + "title": "Standard list metadata.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\n+optional" + }, + "status": { + "type": "string", + "title": "Status of the operation.\nOne of: \"Success\" or \"Failure\".\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status\n+optional" + }, + "message": { + "type": "string", + "title": "A human-readable description of the status of this operation.\n+optional" + }, + "reason": { + "type": "string", + "title": "A machine-readable description of why this operation is in the\n\"Failure\" status. If this value is empty there\nis no information available. A Reason clarifies an HTTP status\ncode but does not override it.\n+optional" + }, + "details": { + "$ref": "#/definitions/v1StatusDetails", + "title": "Extended data associated with the reason. Each reason may define its\nown extended details. This field is optional and the data returned\nis not guaranteed to conform to any schema except that defined by\nthe reason type.\n+optional" + }, + "code": { + "type": "integer", + "format": "int32", + "title": "Suggested HTTP return code for this status, 0 if not set.\n+optional" + } + }, + "description": "Status is a return value for calls that don't return other objects." + }, + "v1StatusCause": { + "type": "object", + "properties": { + "reason": { + "type": "string", + "title": "A machine-readable description of the cause of the error. If this value is\nempty there is no information available.\n+optional" + }, + "message": { + "type": "string", + "title": "A human-readable description of the cause of the error. This field may be\npresented as-is to a reader.\n+optional" + }, + "field": { + "type": "string", + "description": "The field of the resource that has caused this error, as named by its JSON\nserialization. May include dot and postfix notation for nested attributes.\nArrays are zero-indexed. Fields may appear more than once in an array of\ncauses due to fields having multiple errors.\nOptional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"\n+optional" + } + }, + "description": "StatusCause provides more information about an api.Status failure, including\ncases when multiple errors are encountered." + }, + "v1StatusDetails": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "The name attribute of the resource associated with the status StatusReason\n(when there is a single name which can be described).\n+optional" + }, + "group": { + "type": "string", + "title": "The group attribute of the resource associated with the status StatusReason.\n+optional" + }, + "kind": { + "type": "string", + "title": "The kind attribute of the resource associated with the status StatusReason.\nOn some operations may differ from the requested resource Kind.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\n+optional" + }, + "uid": { + "type": "string", + "title": "UID of the resource.\n(when there is a single resource which can be described).\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional" + }, + "causes": { + "type": "array", + "items": { + "$ref": "#/definitions/v1StatusCause" + }, + "title": "The Causes array includes more details associated with the StatusReason\nfailure. Not all StatusReasons may provide detailed causes.\n+optional" + }, + "retryAfterSeconds": { + "type": "integer", + "format": "int32", + "title": "If specified, the time in seconds before the operation should be retried. Some errors may indicate\nthe client must take an alternate action - for those errors this field may indicate how long to wait\nbefore taking the alternate action.\n+optional" + } + }, + "description": "StatusDetails is a set of additional properties that MAY be set by the\nserver to provide additional information about a response. The Reason\nfield of a Status object defines what attributes will be set. Clients\nmust ignore fields that do not match the defined type of each attribute,\nand should assume that any attribute may be empty, invalid, or under\ndefined." + }, + "v1StorageOSVolumeSource": { + "type": "object", + "properties": { + "volumeName": { + "type": "string", + "description": "VolumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace." + }, + "volumeNamespace": { + "type": "string", + "title": "VolumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created.\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "SecretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted.\n+optional" + } + }, + "description": "Represents a StorageOS persistent volume resource." + }, + "v1Sysctl": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of a property to set" + }, + "value": { + "type": "string", + "title": "Value of a property to set" + } + }, + "title": "Sysctl defines a kernel parameter to be set" + }, + "v1TCPSocketAction": { + "type": "object", + "properties": { + "port": { + "$ref": "#/definitions/intstrIntOrString", + "description": "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + }, + "host": { + "type": "string", + "title": "Optional: Host name to connect to, defaults to the pod IP.\n+optional" + } + }, + "title": "TCPSocketAction describes an action based on opening a socket" + }, + "v1Time": { + "type": "object", + "properties": { + "seconds": { + "type": "string", + "format": "int64", + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive." + }, + "nanos": { + "type": "integer", + "format": "int32", + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context." + } + }, + "description": "Time is a wrapper around time.Time which supports correct\nmarshaling to YAML and JSON. Wrappers are provided for many\nof the factory methods that the time package offers.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false" + }, + "v1Toleration": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "Key is the taint key that the toleration applies to. Empty means match all taint keys.\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys.\n+optional" + }, + "operator": { + "type": "string", + "title": "Operator represents a key's relationship to the value.\nValid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod can\ntolerate all taints of a particular category.\n+optional" + }, + "value": { + "type": "string", + "title": "Value is the taint value the toleration matches to.\nIf the operator is Exists, the value should be empty, otherwise just a regular string.\n+optional" + }, + "effect": { + "type": "string", + "title": "Effect indicates the taint effect to match. Empty means match all taint effects.\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.\n+optional" + }, + "tolerationSeconds": { + "type": "string", + "format": "int64", + "title": "TolerationSeconds represents the period of time the toleration (which must be\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\nit is not set, which means tolerate the taint forever (do not evict). Zero and\nnegative values will be treated as 0 (evict immediately) by the system.\n+optional" + } + }, + "description": "The pod this Toleration is attached to tolerates any taint that matches\nthe triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e." + }, + "v1TypedLocalObjectReference": { + "type": "object", + "properties": { + "apiGroup": { + "type": "string", + "title": "APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required.\n+optional" + }, + "kind": { + "type": "string", + "title": "Kind is the type of resource being referenced" + }, + "name": { + "type": "string", + "title": "Name is the name of resource being referenced" + } + }, + "description": "TypedLocalObjectReference contains enough information to let you locate the\ntyped referenced object inside the same namespace." + }, + "v1Volume": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Volume's name.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + }, + "volumeSource": { + "$ref": "#/definitions/v1VolumeSource", + "description": "VolumeSource represents the location and type of the mounted volume.\nIf not specified, the Volume is implied to be an EmptyDir.\nThis implied behavior is deprecated and will be removed in a future version." + } + }, + "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod." + }, + "v1VolumeDevice": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name must match the name of a persistentVolumeClaim in the pod" + }, + "devicePath": { + "type": "string", + "description": "devicePath is the path inside of the container that the device will be mapped to." + } + }, + "description": "volumeDevice describes a mapping of a raw block device within a container." + }, + "v1VolumeMount": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "This must match the Name of a Volume." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false.\n+optional" + }, + "mountPath": { + "type": "string", + "description": "Path within the container at which the volume should be mounted. Must\nnot contain ':'." + }, + "subPath": { + "type": "string", + "title": "Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root).\n+optional" + }, + "mountPropagation": { + "type": "string", + "title": "mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.\n+optional" + }, + "subPathExpr": { + "type": "string", + "title": "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive.\nThis field is alpha in 1.14.\n+optional" + } + }, + "description": "VolumeMount describes a mounting of a Volume within a container." + }, + "v1VolumeProjection": { + "type": "object", + "properties": { + "secret": { + "$ref": "#/definitions/v1SecretProjection", + "title": "information about the secret data to project\n+optional" + }, + "downwardAPI": { + "$ref": "#/definitions/v1DownwardAPIProjection", + "title": "information about the downwardAPI data to project\n+optional" + }, + "configMap": { + "$ref": "#/definitions/v1ConfigMapProjection", + "title": "information about the configMap data to project\n+optional" + }, + "serviceAccountToken": { + "$ref": "#/definitions/v1ServiceAccountTokenProjection", + "title": "information about the serviceAccountToken data to project\n+optional" + } + }, + "title": "Projection that may be projected along with other supported volume types" + }, + "v1VolumeSource": { + "type": "object", + "properties": { + "hostPath": { + "$ref": "#/definitions/v1HostPathVolumeSource", + "title": "HostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write.\n+optional" + }, + "emptyDir": { + "$ref": "#/definitions/v1EmptyDirVolumeSource", + "title": "EmptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" + }, + "gcePersistentDisk": { + "$ref": "#/definitions/v1GCEPersistentDiskVolumeSource", + "title": "GCEPersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + }, + "awsElasticBlockStore": { + "$ref": "#/definitions/v1AWSElasticBlockStoreVolumeSource", + "title": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" + }, + "gitRepo": { + "$ref": "#/definitions/v1GitRepoVolumeSource", + "title": "GitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.\n+optional" + }, + "secret": { + "$ref": "#/definitions/v1SecretVolumeSource", + "title": "Secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" + }, + "nfs": { + "$ref": "#/definitions/v1NFSVolumeSource", + "title": "NFS represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" + }, + "iscsi": { + "$ref": "#/definitions/v1ISCSIVolumeSource", + "title": "ISCSI represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md\n+optional" + }, + "glusterfs": { + "$ref": "#/definitions/v1GlusterfsVolumeSource", + "title": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md\n+optional" + }, + "persistentVolumeClaim": { + "$ref": "#/definitions/v1PersistentVolumeClaimVolumeSource", + "title": "PersistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional" + }, + "rbd": { + "$ref": "#/definitions/v1RBDVolumeSource", + "title": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md\n+optional" + }, + "flexVolume": { + "$ref": "#/definitions/v1FlexVolumeSource", + "title": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin.\n+optional" + }, + "cinder": { + "$ref": "#/definitions/v1CinderVolumeSource", + "title": "Cinder represents a cinder volume attached and mounted on kubelets host machine\nMore info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md\n+optional" + }, + "cephfs": { + "$ref": "#/definitions/v1CephFSVolumeSource", + "title": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime\n+optional" + }, + "flocker": { + "$ref": "#/definitions/v1FlockerVolumeSource", + "title": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running\n+optional" + }, + "downwardAPI": { + "$ref": "#/definitions/v1DownwardAPIVolumeSource", + "title": "DownwardAPI represents downward API about the pod that should populate this volume\n+optional" + }, + "fc": { + "$ref": "#/definitions/v1FCVolumeSource", + "title": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.\n+optional" + }, + "azureFile": { + "$ref": "#/definitions/v1AzureFileVolumeSource", + "title": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.\n+optional" + }, + "configMap": { + "$ref": "#/definitions/v1ConfigMapVolumeSource", + "title": "ConfigMap represents a configMap that should populate this volume\n+optional" + }, + "vsphereVolume": { + "$ref": "#/definitions/v1VsphereVirtualDiskVolumeSource", + "title": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine\n+optional" + }, + "quobyte": { + "$ref": "#/definitions/v1QuobyteVolumeSource", + "title": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime\n+optional" + }, + "azureDisk": { + "$ref": "#/definitions/v1AzureDiskVolumeSource", + "title": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.\n+optional" + }, + "photonPersistentDisk": { + "$ref": "#/definitions/v1PhotonPersistentDiskVolumeSource", + "title": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" + }, + "projected": { + "$ref": "#/definitions/v1ProjectedVolumeSource", + "title": "Items for all in one resources secrets, configmaps, and downward API" + }, + "portworxVolume": { + "$ref": "#/definitions/v1PortworxVolumeSource", + "title": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine\n+optional" + }, + "scaleIO": { + "$ref": "#/definitions/v1ScaleIOVolumeSource", + "title": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.\n+optional" + }, + "storageos": { + "$ref": "#/definitions/v1StorageOSVolumeSource", + "title": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.\n+optional" + }, + "csi": { + "$ref": "#/definitions/v1CSIVolumeSource", + "title": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).\n+optional" + } + }, + "description": "Represents the source of a volume to mount.\nOnly one of its members may be specified." + }, + "v1VsphereVirtualDiskVolumeSource": { + "type": "object", + "properties": { + "volumePath": { + "type": "string", + "title": "Path that identifies vSphere volume vmdk" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + }, + "storagePolicyName": { + "type": "string", + "title": "Storage Policy Based Management (SPBM) profile name.\n+optional" + }, + "storagePolicyID": { + "type": "string", + "title": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.\n+optional" + } + }, + "description": "Represents a vSphere volume resource." + }, + "v1WeightedPodAffinityTerm": { + "type": "object", + "properties": { + "weight": { + "type": "integer", + "format": "int32", + "description": "weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." + }, + "podAffinityTerm": { + "$ref": "#/definitions/v1PodAffinityTerm", + "description": "Required. A pod affinity term, associated with the corresponding weight." + } + }, + "title": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)" + }, + "v1alpha1ArchiveStrategy": { + "type": "object", + "properties": { + "tar": { + "$ref": "#/definitions/v1alpha1TarStrategy" + }, + "none": { + "$ref": "#/definitions/v1alpha1NoneStrategy" + } + }, + "title": "ArchiveStrategy describes how to archive files/directory when saving artifacts" + }, + "v1alpha1Arguments": { + "type": "object", + "properties": { + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Parameter" + }, + "title": "Parameters is the list of parameters to pass to the template or workflow" + }, + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Artifact" + }, + "title": "Artifacts is the list of artifacts to pass to the template or workflow" + } + }, + "title": "Arguments to a template" + }, + "v1alpha1Artifact": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "name of the artifact. must be unique within a template's inputs/outputs." + }, + "path": { + "type": "string", + "title": "Path is the container path to the artifact" + }, + "mode": { + "type": "integer", + "format": "int32", + "description": "mode bits to use on this file, must be a value between 0 and 0777\nset when loading input artifacts." + }, + "from": { + "type": "string", + "title": "From allows an artifact to reference an artifact from a previous step" + }, + "artifactLocation": { + "$ref": "#/definitions/v1alpha1ArtifactLocation", + "title": "ArtifactLocation contains the location of the artifact" + }, + "globalName": { + "type": "string", + "title": "GlobalName exports an output artifact to the global scope, making it available as\n'{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts" + }, + "archive": { + "$ref": "#/definitions/v1alpha1ArchiveStrategy", + "description": "Archive controls how the artifact will be saved to the artifact repository." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Make Artifacts optional, if Artifacts doesn't generate or exist" + } + }, + "title": "Artifact indicates an artifact to place at a specified path" + }, + "v1alpha1ArtifactLocation": { + "type": "object", + "properties": { + "archiveLogs": { + "type": "boolean", + "format": "boolean", + "title": "ArchiveLogs indicates if the container logs should be archived" + }, + "s3": { + "$ref": "#/definitions/v1alpha1S3Artifact", + "title": "S3 contains S3 artifact location details" + }, + "git": { + "$ref": "#/definitions/v1alpha1GitArtifact", + "title": "Git contains git artifact location details" + }, + "http": { + "$ref": "#/definitions/v1alpha1HTTPArtifact", + "title": "HTTP contains HTTP artifact location details" + }, + "artifactory": { + "$ref": "#/definitions/v1alpha1ArtifactoryArtifact", + "title": "Artifactory contains artifactory artifact location details" + }, + "hdfs": { + "$ref": "#/definitions/v1alpha1HDFSArtifact", + "title": "HDFS contains HDFS artifact location details" + }, + "raw": { + "$ref": "#/definitions/v1alpha1RawArtifact", + "title": "Raw contains raw artifact location details" + } + }, + "description": "ArtifactLocation describes a location for a single or multiple artifacts.\nIt is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname).\nIt is also used to describe the location of multiple artifacts such as the archive location\nof a single workflow step, which the executor will use as a default location to store its files." + }, + "v1alpha1ArtifactRepositoryRef": { + "type": "object", + "properties": { + "configMap": { + "type": "string" + }, + "key": { + "type": "string" + } + } + }, + "v1alpha1ArtifactoryArtifact": { + "type": "object", + "properties": { + "url": { + "type": "string", + "title": "URL of the artifact" + }, + "artifactoryAuth": { + "$ref": "#/definitions/v1alpha1ArtifactoryAuth" + } + }, + "title": "ArtifactoryArtifact is the location of an artifactory artifact" + }, + "v1alpha1ArtifactoryAuth": { + "type": "object", + "properties": { + "usernameSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "UsernameSecret is the secret selector to the repository username" + }, + "passwordSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "PasswordSecret is the secret selector to the repository password" + } + }, + "title": "ArtifactoryAuth describes the secret selectors required for authenticating to artifactory" + }, + "v1alpha1ContinueOn": { + "type": "object", + "properties": { + "error": { + "type": "boolean", + "format": "boolean", + "title": "+optional" + }, + "failed": { + "type": "boolean", + "format": "boolean", + "title": "+optional" + } + }, + "description": "ContinueOn defines if a workflow should continue even if a task or step fails/errors.\nIt can be specified if the workflow should continue when the pod errors, fails or both." + }, + "v1alpha1DAGTask": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the target" + }, + "template": { + "type": "string", + "title": "Name of template to execute" + }, + "arguments": { + "$ref": "#/definitions/v1alpha1Arguments", + "title": "Arguments are the parameter and artifact arguments to the template" + }, + "templateRef": { + "$ref": "#/definitions/v1alpha1TemplateRef", + "description": "TemplateRef is the reference to the template resource to execute." + }, + "dependencies": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Dependencies are name of other targets which this depends on" + }, + "withItems": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Item" + }, + "title": "WithItems expands a task into multiple parallel tasks from the items in the list" + }, + "withParam": { + "type": "string", + "description": "WithParam expands a task into multiple parallel tasks from the value in the parameter,\nwhich is expected to be a JSON list." + }, + "withSequence": { + "$ref": "#/definitions/v1alpha1Sequence", + "title": "WithSequence expands a task into a numeric sequence" + }, + "when": { + "type": "string", + "title": "When is an expression in which the task should conditionally execute" + }, + "continueOn": { + "$ref": "#/definitions/v1alpha1ContinueOn", + "title": "ContinueOn makes argo to proceed with the following step even if this step fails.\nErrors and Failed states can be specified" + } + }, + "title": "DAGTask represents a node in the graph during DAG execution" + }, + "v1alpha1DAGTemplate": { + "type": "object", + "properties": { + "target": { + "type": "string", + "title": "Target are one or more names of targets to execute in a DAG" + }, + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1DAGTask" + }, + "title": "Tasks are a list of DAG tasks" + }, + "failFast": { + "type": "boolean", + "format": "boolean", + "title": "This flag is for DAG logic. The DAG logic has a built-in \"fail fast\" feature to stop scheduling new steps,\nas soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed\nbefore failing the DAG itself.\nThe FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to\ncompletion (either success or failure), regardless of the failed outcomes of branches in the DAG.\nMore info and example about this feature at https://github.com/argoproj/argo/issues/1442" + } + }, + "title": "DAGTemplate is a template subtype for directed acyclic graph templates" + }, + "v1alpha1GitArtifact": { + "type": "object", + "properties": { + "repo": { + "type": "string", + "title": "Repo is the git repository" + }, + "revision": { + "type": "string", + "title": "Revision is the git commit, tag, branch to checkout" + }, + "depth": { + "type": "string", + "format": "uint64", + "title": "Depth specifies clones/fetches should be shallow and include the given\nnumber of commits from the branch tip" + }, + "fetch": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Fetch specifies a number of refs that should be fetched before checkout" + }, + "usernameSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "UsernameSecret is the secret selector to the repository username" + }, + "passwordSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "PasswordSecret is the secret selector to the repository password" + }, + "sshPrivateKeySecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "SSHPrivateKeySecret is the secret selector to the repository ssh private key" + }, + "insecureIgnoreHostKey": { + "type": "boolean", + "format": "boolean", + "title": "InsecureIgnoreHostKey disables SSH strict host key checking during git clone" + } + }, + "title": "GitArtifact is the location of an git artifact" + }, + "v1alpha1HDFSArtifact": { + "type": "object", + "properties": { + "hDFSConfig": { + "$ref": "#/definitions/v1alpha1HDFSConfig" + }, + "path": { + "type": "string", + "title": "Path is a file path in HDFS" + }, + "force": { + "type": "boolean", + "format": "boolean", + "title": "Force copies a file forcibly even if it exists (default: false)" + } + }, + "title": "HDFSArtifact is the location of an HDFS artifact" + }, + "v1alpha1HDFSConfig": { + "type": "object", + "properties": { + "hDFSKrbConfig": { + "$ref": "#/definitions/v1alpha1HDFSKrbConfig" + }, + "addresses": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Addresses is accessible addresses of HDFS name nodes" + }, + "hdfsUser": { + "type": "string", + "description": "HDFSUser is the user to access HDFS file system.\nIt is ignored if either ccache or keytab is used." + } + }, + "title": "HDFSConfig is configurations for HDFS" + }, + "v1alpha1HDFSKrbConfig": { + "type": "object", + "properties": { + "krbCCacheSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "description": "KrbCCacheSecret is the secret selector for Kerberos ccache\nEither ccache or keytab can be set to use Kerberos." + }, + "krbKeytabSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "description": "KrbKeytabSecret is the secret selector for Kerberos keytab\nEither ccache or keytab can be set to use Kerberos." + }, + "krbUsername": { + "type": "string", + "description": "KrbUsername is the Kerberos username used with Kerberos keytab\nIt must be set if keytab is used." + }, + "krbRealm": { + "type": "string", + "description": "KrbRealm is the Kerberos realm used with Kerberos keytab\nIt must be set if keytab is used." + }, + "krbConfigConfigMap": { + "$ref": "#/definitions/v1ConfigMapKeySelector", + "description": "KrbConfig is the configmap selector for Kerberos config as string\nIt must be set if either ccache or keytab is used." + }, + "krbServicePrincipalName": { + "type": "string", + "description": "KrbServicePrincipalName is the principal name of Kerberos service\nIt must be set if either ccache or keytab is used." + } + }, + "title": "HDFSKrbConfig is auth configurations for Kerberos" + }, + "v1alpha1HTTPArtifact": { + "type": "object", + "properties": { + "url": { + "type": "string", + "title": "URL of the artifact" + } + }, + "title": "HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container" + }, + "v1alpha1Inputs": { + "type": "object", + "properties": { + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Parameter" + }, + "title": "Parameters are a list of parameters passed as inputs" + }, + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Artifact" + }, + "title": "Artifact are a list of artifacts passed as inputs" + } + }, + "title": "Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another" + }, + "v1alpha1Item": { + "type": "object", + "properties": { + "type": { + "type": "string", + "format": "int64" + }, + "numVal": { + "type": "string" + }, + "boolVal": { + "type": "boolean", + "format": "boolean" + }, + "strVal": { + "type": "string" + }, + "mapVal": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1alpha1ItemValue" + } + }, + "listVal": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ItemValue" + } + } + }, + "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", + "title": "Item expands a single workflow step into multiple parallel steps\nThe value of Item can be a map, string, bool, or number" + }, + "v1alpha1ItemValue": { + "type": "object", + "properties": { + "type": { + "type": "string", + "format": "int64" + }, + "numVal": { + "type": "string" + }, + "boolVal": { + "type": "boolean", + "format": "boolean" + }, + "strVal": { + "type": "string" + }, + "mapVal": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "listVal": { + "type": "array", + "items": { + "type": "string", + "format": "byte" + } + } + }, + "title": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true" + }, + "v1alpha1Metadata": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "title": "Pod metdata" + }, + "v1alpha1NoneStrategy": { + "type": "object", + "description": "NoneStrategy indicates to skip tar process and upload the files or directory tree as independent\nfiles. Note that if the artifact is a directory, the artifact driver must support the ability to\nsave/load the directory appropriately." + }, + "v1alpha1Outputs": { + "type": "object", + "properties": { + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Parameter" + }, + "title": "Parameters holds the list of output parameters produced by a step" + }, + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Artifact" + }, + "title": "Artifacts holds the list of output artifacts produced by a step" + }, + "result": { + "type": "string", + "title": "Result holds the result (stdout) of a script template" + } + }, + "title": "Outputs hold parameters, artifacts, and results from a step" + }, + "v1alpha1ParallelSteps": { + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1WorkflowStep" + } + } + } + }, + "v1alpha1Parameter": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the parameter name" + }, + "default": { + "type": "string", + "title": "Default is the default value to use for an input parameter if a value was not supplied" + }, + "value": { + "type": "string", + "title": "Value is the literal value to use for the parameter.\nIf specified in the context of an input parameter, the value takes precedence over any passed values" + }, + "valueFrom": { + "$ref": "#/definitions/v1alpha1ValueFrom", + "title": "ValueFrom is the source for the output parameter's value" + }, + "globalName": { + "type": "string", + "title": "GlobalName exports an output parameter to the global scope, making it available as\n'{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters" + } + }, + "title": "Parameter indicate a passed string parameter to a service template with an optional default value" + }, + "v1alpha1PodGC": { + "type": "object", + "properties": { + "strategy": { + "type": "string" + } + }, + "title": "PodGC describes how to delete completed pods as they complete" + }, + "v1alpha1RawArtifact": { + "type": "object", + "properties": { + "data": { + "type": "string", + "title": "Data is the string contents of the artifact" + } + }, + "title": "RawArtifact allows raw string content to be placed as an artifact in a container" + }, + "v1alpha1ResourceTemplate": { + "type": "object", + "properties": { + "action": { + "type": "string", + "title": "Action is the action to perform to the resource.\nMust be one of: get, create, apply, delete, replace, patch" + }, + "mergeStrategy": { + "type": "string", + "title": "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\"\nMust be one of: strategic, merge, json" + }, + "manifest": { + "type": "string", + "title": "Manifest contains the kubernetes manifest" + }, + "setOwnerReference": { + "type": "boolean", + "format": "boolean", + "description": "SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource." + }, + "successCondition": { + "type": "string", + "title": "SuccessCondition is a label selector expression which describes the conditions\nof the k8s resource in which it is acceptable to proceed to the following step" + }, + "failureCondition": { + "type": "string", + "title": "FailureCondition is a label selector expression which describes the conditions\nof the k8s resource in which the step was considered failed" + } + }, + "title": "ResourceTemplate is a template subtype to manipulate kubernetes resources" + }, + "v1alpha1RetryStrategy": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "format": "int32", + "title": "Limit is the maximum number of attempts when retrying a container" + } + }, + "title": "RetryStrategy provides controls on how to retry a workflow step" + }, + "v1alpha1S3Artifact": { + "type": "object", + "properties": { + "s3Bucket": { + "$ref": "#/definitions/v1alpha1S3Bucket" + }, + "key": { + "type": "string", + "title": "Key is the key in the bucket where the artifact resides" + } + }, + "title": "S3Artifact is the location of an S3 artifact" + }, + "v1alpha1S3Bucket": { + "type": "object", + "properties": { + "endpoint": { + "type": "string", + "title": "Endpoint is the hostname of the bucket endpoint" + }, + "bucket": { + "type": "string", + "title": "Bucket is the name of the bucket" + }, + "region": { + "type": "string", + "title": "Region contains the optional bucket region" + }, + "insecure": { + "type": "boolean", + "format": "boolean", + "title": "Insecure will connect to the service with TLS" + }, + "accessKeySecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "AccessKeySecret is the secret selector to the bucket's access key" + }, + "secretKeySecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "SecretKeySecret is the secret selector to the bucket's secret key" + } + }, + "title": "S3Bucket contains the access information required for interfacing with an S3 bucket" + }, + "v1alpha1ScriptTemplate": { + "type": "object", + "properties": { + "container": { + "$ref": "#/definitions/v1Container" + }, + "source": { + "type": "string", + "title": "Source contains the source code of the script to execute" + } + }, + "title": "ScriptTemplate is a template subtype to enable scripting through code steps" + }, + "v1alpha1Sequence": { + "type": "object", + "properties": { + "count": { + "type": "string", + "title": "Count is number of elements in the sequence (default: 0). Not to be used with end" + }, + "start": { + "type": "string", + "title": "Number at which to start the sequence (default: 0)" + }, + "end": { + "type": "string", + "title": "Number at which to end the sequence (default: 0). Not to be used with Count" + }, + "format": { + "type": "string", + "title": "Format is a printf format string to format the value in the sequence" + } + }, + "title": "Sequence expands a workflow step into numeric range" + }, + "v1alpha1SuspendTemplate": { + "type": "object", + "title": "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time" + }, + "v1alpha1TarStrategy": { + "type": "object", + "title": "TarStrategy will tar and gzip the file or directory when saving" + }, + "v1alpha1Template": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the template" + }, + "template": { + "type": "string", + "description": "Template is the name of the template which is used as the base of this template." + }, + "arguments": { + "$ref": "#/definitions/v1alpha1Arguments", + "description": "Arguments hold arguments to the template." + }, + "templateRef": { + "$ref": "#/definitions/v1alpha1TemplateRef", + "description": "TemplateRef is the reference to the template resource which is used as the base of this template." + }, + "inputs": { + "$ref": "#/definitions/v1alpha1Inputs", + "title": "Inputs describe what inputs parameters and artifacts are supplied to this template" + }, + "outputs": { + "$ref": "#/definitions/v1alpha1Outputs", + "title": "Outputs describe the parameters and artifacts that this template produces" + }, + "nodeSelector": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector to schedule this step of the workflow to be\nrun on the selected node(s). Overrides the selector set at the workflow level." + }, + "affinity": { + "$ref": "#/definitions/v1Affinity", + "title": "Affinity sets the pod's scheduling constraints\nOverrides the affinity set at the workflow level (if any)" + }, + "metadata": { + "$ref": "#/definitions/v1alpha1Metadata", + "title": "Metdata sets the pods's metadata, i.e. annotations and labels" + }, + "daemon": { + "type": "boolean", + "format": "boolean", + "title": "Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness" + }, + "steps": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ParallelSteps" + }, + "title": "Steps define a series of sequential/parallel workflow steps" + }, + "container": { + "$ref": "#/definitions/v1Container", + "title": "Container is the main container image to run in the pod" + }, + "script": { + "$ref": "#/definitions/v1alpha1ScriptTemplate", + "title": "Script runs a portion of code against an interpreter" + }, + "resource": { + "$ref": "#/definitions/v1alpha1ResourceTemplate", + "title": "Resource template subtype which can run k8s resources" + }, + "dag": { + "$ref": "#/definitions/v1alpha1DAGTemplate", + "title": "DAG template subtype which runs a DAG" + }, + "suspend": { + "$ref": "#/definitions/v1alpha1SuspendTemplate", + "title": "Suspend template subtype which can suspend a workflow when reaching the step" + }, + "volumes": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Volume" + }, + "description": "Volumes is a list of volumes that can be mounted by containers in a template." + }, + "initContainers": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1UserContainer" + }, + "description": "InitContainers is a list of containers which run before the main container." + }, + "sidecars": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1UserContainer" + }, + "title": "Sidecars is a list of containers which run alongside the main container\nSidecars are automatically killed when the main container completes" + }, + "archiveLocation": { + "$ref": "#/definitions/v1alpha1ArtifactLocation", + "description": "Location in which all files related to the step will be stored (logs, artifacts, etc...).\nCan be overridden by individual items in Outputs. If omitted, will use the default\nartifact repository location configured in the controller, appended with the\n\u003cworkflowname\u003e/\u003cnodename\u003e in the key." + }, + "activeDeadlineSeconds": { + "type": "string", + "format": "int64", + "description": "Optional duration in seconds relative to the StartTime that the pod may be active on a node\nbefore the system actively tries to terminate the pod; value must be positive integer\nThis field is only applicable to container and script templates." + }, + "retryStrategy": { + "$ref": "#/definitions/v1alpha1RetryStrategy", + "title": "RetryStrategy describes how to retry a template when it fails" + }, + "parallelism": { + "type": "string", + "format": "int64", + "description": "Parallelism limits the max total parallel pods that can execute at the same time within the\nboundaries of this template invocation. If additional steps/dag templates are invoked, the\npods created by those templates will not be counted towards this total." + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Toleration" + }, + "description": "Tolerations to apply to workflow pods." + }, + "schedulerName": { + "type": "string", + "title": "If specified, the pod will be dispatched by specified scheduler.\nOr it will be dispatched by workflow scope scheduler if specified.\nIf neither specified, the pod will be dispatched by default scheduler.\n+optional" + }, + "priorityClassName": { + "type": "string", + "description": "PriorityClassName to apply to workflow pods." + }, + "priority": { + "type": "integer", + "format": "int32", + "description": "Priority to apply to workflow pods." + }, + "serviceAccountName": { + "type": "string", + "title": "ServiceAccountName to apply to workflow pods" + }, + "hostAliases": { + "type": "array", + "items": { + "$ref": "#/definitions/v1HostAlias" + }, + "title": "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec" + }, + "securityContext": { + "$ref": "#/definitions/v1PodSecurityContext", + "title": "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field.\n+optional" + } + }, + "title": "Template is a reusable and composable unit of execution in a workflow" + }, + "v1alpha1TemplateRef": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name is the resource name of the template." + }, + "template": { + "type": "string", + "description": "Template is the name of referred template in the resource." + }, + "runtimeResolution": { + "type": "boolean", + "format": "boolean", + "description": "RuntimeResolution skips validation at creation time.\nBy enabling this option, you can create the referred workflow template before the actual runtime." + } + }, + "description": "TemplateRef is a reference of template resource." + }, + "v1alpha1UserContainer": { + "type": "object", + "properties": { + "container": { + "$ref": "#/definitions/v1Container" + }, + "mirrorVolumeMounts": { + "type": "boolean", + "format": "boolean", + "title": "MirrorVolumeMounts will mount the same volumes specified in the main container\nto the container (including artifacts), at the same mountPaths. This enables\ndind daemon to partially see the same filesystem as the main container in\norder to use features such as docker volume binding" + } + }, + "description": "UserContainer is a container specified by a user." + }, + "v1alpha1ValueFrom": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path in the container to retrieve an output parameter value from in container templates" + }, + "jsonPath": { + "type": "string", + "title": "JSONPath of a resource to retrieve an output parameter value from in resource templates" + }, + "jqFilter": { + "type": "string", + "title": "JQFilter expression against the resource object in resource templates" + }, + "parameter": { + "type": "string", + "title": "Parameter reference to a step or dag task in which to retrieve an output parameter value from\n(e.g. '{{steps.mystep.outputs.myparam}}')" + } + }, + "title": "ValueFrom describes a location in which to obtain the value to a parameter" + }, + "v1alpha1Workflow": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/v1alpha1WorkflowSpec" + }, + "status": { + "$ref": "#/definitions/v1alpha1WorkflowStatus" + } + }, + "title": "Workflow is the definition of a workflow resource\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object" + }, + "v1alpha1WorkflowSpec": { + "type": "object", + "properties": { + "templates": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Template" + }, + "title": "Templates is a list of workflow templates used in a workflow" + }, + "entrypoint": { + "type": "string", + "title": "Entrypoint is a template reference to the starting point of the workflow" + }, + "arguments": { + "$ref": "#/definitions/v1alpha1Arguments", + "title": "Arguments contain the parameters and artifacts sent to the workflow entrypoint\nParameters are referencable globally using the 'workflow' variable prefix.\ne.g. {{workflow.parameters.myparam}}" + }, + "serviceAccountName": { + "type": "string", + "description": "ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as." + }, + "volumes": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Volume" + }, + "description": "Volumes is a list of volumes that can be mounted by containers in a workflow." + }, + "volumeClaimTemplates": { + "type": "array", + "items": { + "$ref": "#/definitions/v1PersistentVolumeClaim" + }, + "title": "VolumeClaimTemplates is a list of claims that containers are allowed to reference.\nThe Workflow controller will create the claims at the beginning of the workflow\nand delete the claims upon completion of the workflow" + }, + "parallelism": { + "type": "string", + "format": "int64", + "title": "Parallelism limits the max total parallel pods that can execute at the same time in a workflow" + }, + "artifactRepositoryRef": { + "$ref": "#/definitions/v1alpha1ArtifactRepositoryRef", + "description": "ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config." + }, + "suspend": { + "type": "boolean", + "format": "boolean", + "title": "Suspend will suspend the workflow and prevent execution of any future steps in the workflow" + }, + "nodeSelector": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector which will result in all pods of the workflow\nto be scheduled on the selected node(s). This is able to be overridden by\na nodeSelector specified in the template." + }, + "affinity": { + "$ref": "#/definitions/v1Affinity", + "title": "Affinity sets the scheduling constraints for all pods in the workflow.\nCan be overridden by an affinity specified in the template" + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Toleration" + }, + "description": "Tolerations to apply to workflow pods." + }, + "imagePullSecrets": { + "type": "array", + "items": { + "$ref": "#/definitions/v1LocalObjectReference" + }, + "title": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images\nin pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets\ncan be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.\nMore info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod" + }, + "hostNetwork": { + "type": "boolean", + "format": "boolean", + "description": "Host networking requested for this workflow pod. Default to false." + }, + "dnsPolicy": { + "type": "string", + "description": "Set DNS policy for the pod.\nDefaults to \"ClusterFirst\".\nValid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.\nDNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.\nTo have DNS options set along with hostNetwork, you have to specify DNS policy\nexplicitly to 'ClusterFirstWithHostNet'." + }, + "dnsConfig": { + "$ref": "#/definitions/v1PodDNSConfig", + "description": "PodDNSConfig defines the DNS parameters of a pod in addition to\nthose generated from DNSPolicy." + }, + "onExit": { + "type": "string", + "description": "OnExit is a template reference which is invoked at the end of the\nworkflow, irrespective of the success, failure, or error of the\nprimary workflow." + }, + "ttlSecondsAfterFinished": { + "type": "integer", + "format": "int32", + "description": "TTLSecondsAfterFinished limits the lifetime of a Workflow that has finished execution\n(Succeeded, Failed, Error). If this field is set, once the Workflow finishes, it will be\ndeleted after ttlSecondsAfterFinished expires. If this field is unset,\nttlSecondsAfterFinished will not expire. If this field is set to zero,\nttlSecondsAfterFinished expires immediately after the Workflow finishes." + }, + "activeDeadlineSeconds": { + "type": "string", + "format": "int64", + "title": "Optional duration in seconds relative to the workflow start time which the workflow is\nallowed to run before the controller terminates the workflow. A value of zero is used to\nterminate a Running workflow" + }, + "priority": { + "type": "integer", + "format": "int32", + "description": "Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first." + }, + "schedulerName": { + "type": "string", + "title": "Set scheduler name for all pods.\nWill be overridden if container/script template's scheduler name is set.\nDefault scheduler will be used if neither specified.\n+optional" + }, + "podGC": { + "$ref": "#/definitions/v1alpha1PodGC", + "title": "PodGC describes the strategy to use when to deleting completed pods" + }, + "podPriorityClassName": { + "type": "string", + "description": "PriorityClassName to apply to workflow pods." + }, + "podPriority": { + "type": "integer", + "format": "int32", + "description": "Priority to apply to workflow pods." + }, + "hostAliases": { + "type": "array", + "items": { + "$ref": "#/definitions/v1HostAlias" + }, + "title": "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec" + }, + "securityContext": { + "$ref": "#/definitions/v1PodSecurityContext", + "title": "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field.\n+optional" + } + }, + "description": "WorkflowSpec is the specification of a Workflow." + }, + "v1alpha1WorkflowStatus": { + "type": "object", + "properties": { + "phase": { + "type": "string", + "description": "Phase a simple, high-level summary of where the workflow is in its lifecycle." + }, + "startedAt": { + "$ref": "#/definitions/v1Time", + "title": "Time at which this workflow started" + }, + "finishedAt": { + "$ref": "#/definitions/v1Time", + "title": "Time at which this workflow completed" + }, + "message": { + "type": "string", + "description": "A human readable message indicating details about why the workflow is in this condition." + }, + "compressedNodes": { + "type": "string", + "title": "Compressed and base64 decoded Nodes map" + }, + "nodes": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/workflowv1alpha1NodeStatus" + }, + "description": "Nodes is a mapping between a node ID and the node's status." + }, + "persistentVolumeClaims": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Volume" + }, + "description": "PersistentVolumeClaims tracks all PVCs that were created as part of the workflow.\nThe contents of this list are drained at the end of the workflow." + }, + "outputs": { + "$ref": "#/definitions/v1alpha1Outputs", + "title": "Outputs captures output values and artifact locations produced by the workflow via global outputs" + } + }, + "title": "WorkflowStatus contains overall status information about a workflow\n+k8s:openapi-gen=false" + }, + "v1alpha1WorkflowStep": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the step" + }, + "template": { + "type": "string", + "title": "Template is the name of the template to execute as the step" + }, + "arguments": { + "$ref": "#/definitions/v1alpha1Arguments", + "title": "Arguments hold arguments to the template" + }, + "templateRef": { + "$ref": "#/definitions/v1alpha1TemplateRef", + "description": "TemplateRef is the reference to the template resource to execute as the step." + }, + "withItems": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Item" + }, + "title": "WithItems expands a step into multiple parallel steps from the items in the list" + }, + "withParam": { + "type": "string", + "description": "WithParam expands a step into multiple parallel steps from the value in the parameter,\nwhich is expected to be a JSON list." + }, + "withSequence": { + "$ref": "#/definitions/v1alpha1Sequence", + "title": "WithSequence expands a step into a numeric sequence" + }, + "when": { + "type": "string", + "title": "When is an expression in which the step should conditionally execute" + }, + "continueOn": { + "$ref": "#/definitions/v1alpha1ContinueOn", + "title": "ContinueOn makes argo to proceed with the following step even if this step fails.\nErrors and Failed states can be specified" + } + }, + "title": "WorkflowStep is a reference to a template to execute in a series of step" + }, + "workflowWorkflowCreateResponse": { + "type": "object", + "properties": { + "response": { + "type": "string" + } + } + }, + "workflowWorkflowListResponse": { + "type": "object", + "properties": { + "workflows": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + } + }, + "workflowWorkflowResponse": { + "type": "object", + "properties": { + "workflows": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "workflowv1alpha1NodeStatus": { + "type": "object", + "properties": { + "id": { + "type": "string", + "title": "ID is a unique identifier of a node within the worklow\nIt is implemented as a hash of the node name, which makes the ID deterministic" + }, + "name": { + "type": "string", + "title": "Name is unique name in the node tree used to generate the node ID" + }, + "displayName": { + "type": "string", + "title": "DisplayName is a human readable representation of the node. Unique within a template boundary" + }, + "type": { + "type": "string", + "title": "Type indicates type of node" + }, + "templateName": { + "type": "string", + "title": "TemplateName is the template name which this node corresponds to.\nNot applicable to virtual nodes (e.g. Retry, StepGroup)" + }, + "templateRef": { + "$ref": "#/definitions/v1alpha1TemplateRef", + "title": "TemplateRef is the reference to the template resource which this node corresponds to.\nNot applicable to virtual nodes (e.g. Retry, StepGroup)" + }, + "phase": { + "type": "string", + "description": "Phase a simple, high-level summary of where the node is in its lifecycle.\nCan be used as a state machine." + }, + "boundaryID": { + "type": "string", + "title": "BoundaryID indicates the node ID of the associated template root node in which this node belongs to" + }, + "message": { + "type": "string", + "description": "A human readable message indicating details about why the node is in this condition." + }, + "startedAt": { + "$ref": "#/definitions/v1Time", + "title": "Time at which this node started" + }, + "finishedAt": { + "$ref": "#/definitions/v1Time", + "title": "Time at which this node completed" + }, + "podIP": { + "type": "string", + "title": "PodIP captures the IP of the pod for daemoned steps" + }, + "daemoned": { + "type": "boolean", + "format": "boolean", + "title": "Daemoned tracks whether or not this node was daemoned and need to be terminated" + }, + "inputs": { + "$ref": "#/definitions/v1alpha1Inputs", + "title": "Inputs captures input parameter values and artifact locations supplied to this template invocation" + }, + "outputs": { + "$ref": "#/definitions/v1alpha1Outputs", + "title": "Outputs captures output parameter values and artifact locations produced by this template invocation" + }, + "children": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Children is a list of child node IDs" + }, + "outboundNodes": { + "type": "array", + "items": { + "type": "string" + }, + "description": "OutboundNodes tracks the node IDs which are considered \"outbound\" nodes to a template invocation.\nFor every invocation of a template, there are nodes which we considered as \"outbound\". Essentially,\nthese are last nodes in the execution sequence to run, before the template is considered completed.\nThese nodes are then connected as parents to a following step.\n\nIn the case of single pod steps (i.e. container, script, resource templates), this list will be nil\nsince the pod itself is already considered the \"outbound\" node.\nIn the case of DAGs, outbound nodes are the \"target\" tasks (tasks with no children).\nIn the case of steps, outbound nodes are all the containers involved in the last step group.\nNOTE: since templates are composable, the list of outbound nodes are carried upwards when\na DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of\na template, will be a superset of the outbound nodes of its last children." + } + }, + "title": "NodeStatus contains status information about an individual node in the workflow\n+k8s:openapi-gen=false" + } + } +} diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go new file mode 100644 index 000000000000..2c8acc0349b7 --- /dev/null +++ b/cmd/server/workflow/workflow_service.go @@ -0,0 +1,67 @@ +package workflow + +import ( + "fmt" + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/pkg/client/clientset/versioned" + //log "github.com/sirupsen/logrus" + "golang.org/x/net/context" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type Server struct{ + Namespace string + Clientset versioned.Clientset +} + + +func NewServer(Namespace string, clientset versioned.Clientset) WorkflowServiceServer{ + return &Server{Namespace:Namespace, Clientset:clientset} +} + +func (s *Server) Create(context.Context, *v1alpha1.Workflow) (*WorkflowCreateResponse, error){ + + return nil, nil +} + + +func (s *Server)Get(ctx context.Context, query *WorkflowQuery) (*WorkflowResponse, error){ + wf, err := s.Clientset.ArgoprojV1alpha1().Workflows(s.Namespace).Get(query.Name, v1.GetOptions{}) + fmt.Println("Welcome") + if err != nil { + fmt.Println(err) + } + var wfRsp WorkflowResponse + //bytes, err := json.Marshal(wf) + wfRsp.Workflows = wf + + //err = wfRsp.Workflows.Unmarshal(byte) + fmt.Println("Error : ",err) + fmt.Println(wfRsp.GetWorkflows()) + //bytes, err := wfRsp.Marshal() + if err != nil { + fmt.Println(err) + } + //wfRsp.Unmarshal(bytes) + fmt.Println(wfRsp) + + return &wfRsp, err +} + +func (s *Server) List(ctx context.Context, query *WorkflowQuery) (*WorkflowListResponse, error) { + wfList, err := s.Clientset.ArgoprojV1alpha1().Workflows(s.Namespace).List(v1.ListOptions{}) + if err != nil { + fmt.Println(err) + } + + //fmt.Println(wfList) + var wfListItem []*v1alpha1.Workflow + for idx,_ := range wfList.Items{ + wfListItem = append(wfListItem, &wfList.Items[idx]) + } + var wfListRsp = WorkflowListResponse{} + wfListRsp.Workflows = wfListItem + fmt.Println(wfListRsp) + return &wfListRsp,nil + +} \ No newline at end of file diff --git a/cmd/server/workflow/workflow_service_test.go b/cmd/server/workflow/workflow_service_test.go new file mode 100644 index 000000000000..e46ee67a942c --- /dev/null +++ b/cmd/server/workflow/workflow_service_test.go @@ -0,0 +1,176 @@ +package workflow + +import ( + "fmt" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/stretchr/testify/assert" + "sigs.k8s.io/yaml" + "testing" +) + + +var wf = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + creationTimestamp: "2019-09-16T22:56:45Z" + generateName: scripts-bash- + generation: 9 + labels: + workflows.argoproj.io/completed: "true" + workflows.argoproj.io/phase: Failed + name: scripts-bash-5ksp4 + namespace: default + resourceVersion: "1414877" + selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/workflows/scripts-bash-5ksp4 + uid: 41a16c4b-d8d5-11e9-8938-025000000001 +spec: + arguments: {} + entrypoint: bash-script-example + templates: + - arguments: {} + inputs: {} + metadata: {} + name: bash-script-example + outputs: {} + steps: + - - arguments: {} + name: generate + template: gen-random-int + - - arguments: + parameters: + - name: message + value: '{{steps.generate.outputs.result}}' + name: print + template: print-message + - arguments: {} + inputs: {} + metadata: {} + name: gen-random-int + outputs: {} + script: + command: + - bash + image: debian:9.4 + name: "" + resources: {} + source: | + cat /dev/urandom | od -N2 -An -i | awk -v f=1 -v r=100 '{printf "%i\n", f + r * $1 / 65536}' + - arguments: {} + container: + args: + - 'echo -e " apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: + name: zk-pdb spec: minAvailable: 2 selector: matchLabels: workflows.argoproj.io/workflow: + {{workflow.name}} " | tee pdb.yaml |sleep 120|kubectl create -f pdb.yaml ' + command: + - sh + - -c + image: lachlanevenson/k8s-kubectl + name: "" + resources: {} + inputs: + parameters: + - name: message + metadata: {} + name: print-message + outputs: {} +status: + finishedAt: "2019-09-16T22:58:59Z" + message: child 'scripts-bash-5ksp4-1961198978' failed + nodes: + scripts-bash-5ksp4: + children: + - scripts-bash-5ksp4-2570590690 + displayName: scripts-bash-5ksp4 + finishedAt: "2019-09-16T22:58:59Z" + id: scripts-bash-5ksp4 + message: child 'scripts-bash-5ksp4-1961198978' failed + name: scripts-bash-5ksp4 + outboundNodes: + - scripts-bash-5ksp4-1961198978 + phase: Failed + startedAt: "2019-09-16T22:56:45Z" + templateName: bash-script-example + type: Steps + scripts-bash-5ksp4-315841411: + boundaryID: scripts-bash-5ksp4 + children: + - scripts-bash-5ksp4-3576997567 + displayName: generate + finishedAt: "2019-09-16T22:56:51Z" + id: scripts-bash-5ksp4-315841411 + name: scripts-bash-5ksp4[0].generate + outputs: + result: "50" + phase: Succeeded + startedAt: "2019-09-16T22:56:45Z" + templateName: gen-random-int + type: Pod + scripts-bash-5ksp4-1961198978: + boundaryID: scripts-bash-5ksp4 + displayName: print + finishedAt: "2019-09-16T22:58:58Z" + id: scripts-bash-5ksp4-1961198978 + inputs: + parameters: + - name: message + value: "50" + message: failed with exit code 1 + name: scripts-bash-5ksp4[1].print + phase: Failed + startedAt: "2019-09-16T22:56:53Z" + templateName: print-message + type: Pod + scripts-bash-5ksp4-2570590690: + boundaryID: scripts-bash-5ksp4 + children: + - scripts-bash-5ksp4-315841411 + displayName: '[0]' + finishedAt: "2019-09-16T22:56:53Z" + id: scripts-bash-5ksp4-2570590690 + name: scripts-bash-5ksp4[0] + phase: Succeeded + startedAt: "2019-09-16T22:56:45Z" + templateName: bash-script-example + type: StepGroup + scripts-bash-5ksp4-3576997567: + boundaryID: scripts-bash-5ksp4 + children: + - scripts-bash-5ksp4-1961198978 + displayName: '[1]' + finishedAt: "2019-09-16T22:58:59Z" + id: scripts-bash-5ksp4-3576997567 + message: child 'scripts-bash-5ksp4-1961198978' failed + name: scripts-bash-5ksp4[1] + phase: Failed + startedAt: "2019-09-16T22:56:53Z" + templateName: bash-script-example + type: StepGroup + phase: Failed + startedAt: "2019-09-16T22:56:45Z" +` + +func unmarshalWF(yamlStr string) *wfv1.Workflow { + var wf wfv1.Workflow + err := yaml.Unmarshal([]byte(yamlStr), &wf) + if err != nil { + panic(err) + } + return &wf +} + +func TestMarshalling(t *testing.T){ + + workf := unmarshalWF(wf) + + wr :=WorkflowResponse{ Workflows:workf} + bytes, err := wr.Marshal() + if err != nil { + + } + wr1 := WorkflowResponse{} + wr1.Unmarshal(bytes) + fmt.Println(wr1) + assert.Equal(t, wr,wr1) + +} diff --git a/hack/generate-proto.sh b/hack/generate-proto.sh index b48843e5c715..6ba6c6b97b46 100755 --- a/hack/generate-proto.sh +++ b/hack/generate-proto.sh @@ -35,91 +35,91 @@ APIMACHINERY_PKGS=( k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/api/core/v1 ) -go-to-protobuf \ - --go-header-file=${PROJECT_ROOT}/hack/custom-boilerplate.go.txt \ - --packages=$(IFS=, ; echo "${PACKAGES[*]}") \ - --apimachinery-packages=$(IFS=, ; echo "${APIMACHINERY_PKGS[*]}") \ - --proto-import=./vendor +#go-to-protobuf \ +# --go-header-file=${PROJECT_ROOT}/hack/custom-boilerplate.go.txt \ +# --packages=$(IFS=, ; echo "${PACKAGES[*]}") \ +# --apimachinery-packages=$(IFS=, ; echo "${APIMACHINERY_PKGS[*]}") \ +# --proto-import=./vendor # Either protoc-gen-go, protoc-gen-gofast, or protoc-gen-gogofast can be used to build # server/*/.pb.go from .proto files. golang/protobuf and gogo/protobuf can be used # interchangeably. The difference in the options are: # 1. protoc-gen-go - official golang/protobuf -#go build -i -o dist/protoc-gen-go ./vendor/github.com/golang/protobuf/protoc-gen-go -#GOPROTOBINARY=go +go build -i -o dist/protoc-gen-go ./vendor/github.com/golang/protobuf/protoc-gen-go +GOPROTOBINARY=go # 2. protoc-gen-gofast - fork of golang golang/protobuf. Faster code generation -#go build -i -o dist/protoc-gen-gofast ./vendor/github.com/gogo/protobuf/protoc-gen-gofast -#GOPROTOBINARY=gofast +go build -i -o dist/protoc-gen-gofast ./vendor/github.com/gogo/protobuf/protoc-gen-gofast +GOPROTOBINARY=gofast # 3. protoc-gen-gogofas'export PS1="\[\033[36m\]\u\[\033[m\]@\[\033[32m\]\h:\[\033[33;1m\]\w\[\033[m\]\$ "t - faster code generation and gogo extensions and flexibility in controlling # the generated go code (e.g. customizing field names, nullable fields) -#go build -i -o dist/protoc-gen-gogofast ./vendor/github.com/gogo/protobuf/protoc-gen-gogofast -#GOPROTOBINARY=gogofast -# -## protoc-gen-grpc-gateway is used to build .pb.gw.go files from from .proto files -#go build -i -o dist/protoc-gen-grpc-gateway ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway -## protoc-gen-swagger is used to build swagger.json -#go build -i -o dist/protoc-gen-swagger ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger -# -## Generate server//(.pb.go|.pb.gw.go) -#PROTO_FILES=$(find $PROJECT_ROOT \( -name "*.proto" -and -path '*/server/*' -or -path '*/reposerver/*' -and -name "*.proto" \)) -#for i in ${PROTO_FILES}; do -# # Path to the google API gateway annotations.proto will be different depending if we are -# # building natively (e.g. from workspace) vs. part of a docker build. -# if [ -f /.dockerenv ]; then -# GOOGLE_PROTO_API_PATH=$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis -# GOGO_PROTOBUF_PATH=$GOPATH/src/github.com/gogo/protobuf -# else -# GOOGLE_PROTO_API_PATH=${PROJECT_ROOT}/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis -# GOGO_PROTOBUF_PATH=${PROJECT_ROOT}/vendor/github.com/gogo/protobuf -# fi -# protoc \ -# -I${PROJECT_ROOT} \ -# -I/usr/local/include \ -# -I./vendor \ -# -I$GOPATH/src \ -# -I${GOOGLE_PROTO_API_PATH} \ -# -I${GOGO_PROTOBUF_PATH} \ -# --${GOPROTOBINARY}_out=plugins=grpc:$GOPATH/src \ -# --grpc-gateway_out=logtostderr=true:$GOPATH/src \ -# --swagger_out=logtostderr=true:. \ -# $i -#done -# -## collect_swagger gathers swagger files into a subdirectory -#collect_swagger() { -# SWAGGER_ROOT="$1" -# EXPECTED_COLLISIONS="$2" -# SWAGGER_OUT="${PROJECT_ROOT}/assets/swagger.json" -# PRIMARY_SWAGGER=`mktemp` -# COMBINED_SWAGGER=`mktemp` -# -# cat < "${PRIMARY_SWAGGER}" -#{ -# "swagger": "2.0", -# "info": { -# "title": "Consolidate Services", -# "description": "Description of all APIs", -# "version": "version not set" -# }, -# "paths": {} -#} -#EOF -# -# /bin/rm -f "${SWAGGER_OUT}" -# -# /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -exec /usr/local/bin/swagger mixin -c "${EXPECTED_COLLISIONS}" "${PRIMARY_SWAGGER}" '{}' \+ > "${COMBINED_SWAGGER}" -# /usr/local/bin/jq -r 'del(.definitions[].properties[]? | select(."$ref"!=null and .description!=null).description) | del(.definitions[].properties[]? | select(."$ref"!=null and .title!=null).title)' "${COMBINED_SWAGGER}" > "${SWAGGER_OUT}" -# -# /bin/rm "${PRIMARY_SWAGGER}" "${COMBINED_SWAGGER}" -#} -# -## clean up generated swagger files (should come after collect_swagger) -#clean_swagger() { -# SWAGGER_ROOT="$1" -# /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -delete -#} -# -#collect_swagger server 21 -#clean_swagger server -#clean_swagger reposerver -#clean_swagger controller \ No newline at end of file +go build -i -o dist/protoc-gen-gogofast ./vendor/github.com/gogo/protobuf/protoc-gen-gogofast +GOPROTOBINARY=gogofast + +# protoc-gen-grpc-gateway is used to build .pb.gw.go files from from .proto files +go build -i -o dist/protoc-gen-grpc-gateway ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway +# protoc-gen-swagger is used to build swagger.json +go build -i -o dist/protoc-gen-swagger ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger + +# Generate server//(.pb.go|.pb.gw.go) +PROTO_FILES=$(find $PROJECT_ROOT \( -name "*.proto" -and -path '*/server/*' -or -path '*/reposerver/*' -and -name "*.proto" \)) +for i in ${PROTO_FILES}; do + # Path to the google API gateway annotations.proto will be different depending if we are + # building natively (e.g. from workspace) vs. part of a docker build. + if [ -f /.dockerenv ]; then + GOOGLE_PROTO_API_PATH=$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis + GOGO_PROTOBUF_PATH=$GOPATH/src/github.com/gogo/protobuf + else + GOOGLE_PROTO_API_PATH=${PROJECT_ROOT}/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis + GOGO_PROTOBUF_PATH=${PROJECT_ROOT}/vendor/github.com/gogo/protobuf + fi + protoc \ + -I${PROJECT_ROOT} \ + -I/usr/local/include \ + -I./vendor \ + -I$GOPATH/src \ + -I${GOOGLE_PROTO_API_PATH} \ + -I${GOGO_PROTOBUF_PATH} \ + --${GOPROTOBINARY}_out=plugins=grpc:$GOPATH/src \ + --grpc-gateway_out=logtostderr=true:$GOPATH/src \ + --swagger_out=logtostderr=true:. \ + $i +done + +# collect_swagger gathers swagger files into a subdirectory +collect_swagger() { + SWAGGER_ROOT="$1" + EXPECTED_COLLISIONS="$2" + SWAGGER_OUT="${PROJECT_ROOT}/assets/swagger.json" + PRIMARY_SWAGGER=`mktemp` + COMBINED_SWAGGER=`mktemp` + + cat < "${PRIMARY_SWAGGER}" +{ + "swagger": "2.0", + "info": { + "title": "Consolidate Services", + "description": "Description of all APIs", + "version": "version not set" + }, + "paths": {} +} +EOF + + /bin/rm -f "${SWAGGER_OUT}" + + /usr/bin/find "cmd/${SWAGGER_ROOT}" -name '*.swagger.json' -exec /usr/local/bin/swagger mixin -c "${EXPECTED_COLLISIONS}" "${PRIMARY_SWAGGER}" '{}' \+ > "${COMBINED_SWAGGER}" + /usr/local/bin/jq -r 'del(.definitions[].properties[]? | select(."$ref"!=null and .description!=null).description) | del(.definitions[].properties[]? | select(."$ref"!=null and .title!=null).title)' "${COMBINED_SWAGGER}" > "${SWAGGER_OUT}" + + /bin/rm "${PRIMARY_SWAGGER}" "${COMBINED_SWAGGER}" +} + +# clean up generated swagger files (should come after collect_swagger) +clean_swagger() { + SWAGGER_ROOT="$1" + /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -delete +} + +collect_swagger server 21 +clean_swagger server +clean_swagger reposerver +clean_swagger controller \ No newline at end of file diff --git a/pkg/apiclient/apiclient.go b/pkg/apiclient/apiclient.go new file mode 100644 index 000000000000..2eafa609d39e --- /dev/null +++ b/pkg/apiclient/apiclient.go @@ -0,0 +1,10 @@ +package apiclient + +const ( + + // EnvArgoCDServer is the environment variable to look for an Argo CD server address + EnvArgoServer = "ARGO_SERVER" + + // MaxGRPCMessageSize contains max grpc message size + MaxGRPCMessageSize = 100 * 1024 * 1024 +) diff --git a/util/json/json.go b/util/json/json.go new file mode 100644 index 000000000000..824fdf224565 --- /dev/null +++ b/util/json/json.go @@ -0,0 +1,90 @@ +package json + +import ( + "encoding/json" + "io" + + gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime" +) + +// JSONMarshaler is a type which satisfies the grpc-gateway Marshaler interface +type JSONMarshaler struct{} + +// ContentType implements gwruntime.Marshaler. +func (j *JSONMarshaler) ContentType() string { + return "application/json" +} + +// Marshal implements gwruntime.Marshaler. +func (j *JSONMarshaler) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// NewDecoder implements gwruntime.Marshaler. +func (j *JSONMarshaler) NewDecoder(r io.Reader) gwruntime.Decoder { + return json.NewDecoder(r) +} + +// NewEncoder implements gwruntime.Marshaler. +func (j *JSONMarshaler) NewEncoder(w io.Writer) gwruntime.Encoder { + return json.NewEncoder(w) +} + +// Unmarshal implements gwruntime.Marshaler. +func (j *JSONMarshaler) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// https://github.com/ksonnet/ksonnet/blob/master/pkg/kubecfg/diff.go +func removeFields(config, live interface{}) interface{} { + switch c := config.(type) { + case map[string]interface{}: + return RemoveMapFields(c, live.(map[string]interface{})) + case []interface{}: + return removeListFields(c, live.([]interface{})) + default: + return live + } +} + +// RemoveMapFields remove all non-existent fields in the live that don't exist in the config +func RemoveMapFields(config, live map[string]interface{}) map[string]interface{} { + result := map[string]interface{}{} + for k, v1 := range config { + v2, ok := live[k] + if !ok { + continue + } + if v2 != nil { + v2 = removeFields(v1, v2) + } + result[k] = v2 + } + return result +} + +func removeListFields(config, live []interface{}) []interface{} { + // If live is longer than config, then the extra elements at the end of the + // list will be returned as-is so they appear in the diff. + result := make([]interface{}, 0, len(live)) + for i, v2 := range live { + if len(config) > i { + if v2 != nil { + v2 = removeFields(config[i], v2) + } + result = append(result, v2) + } else { + result = append(result, v2) + } + } + return result +} + +// MustMarshal is a convenience function to marshal an object successfully or panic +func MustMarshal(v interface{}) []byte { + bytes, err := json.Marshal(v) + if err != nil { + panic(err) + } + return bytes +} From bf33fb4864f5c968addb38ba73ed6f3391f7f214 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Wed, 9 Oct 2019 21:59:33 -0700 Subject: [PATCH 003/421] Update code --- Gopkg.lock | 6 +- cmd/argo/commands/common.go | 11 +- cmd/argo/commands/retry.go | 2 +- cmd/client.go | 32 -- cmd/client/client.go | 128 +++++ cmd/main.go | 22 +- cmd/server/argoserver.go | 38 +- cmd/server/workflow/common.go | 68 +++ cmd/server/workflow/workflow.pb.go | 667 +++++++++++++++++++--- cmd/server/workflow/workflow.pb.gw.go | 450 ++++++++++++++- cmd/server/workflow/workflow.proto | 73 ++- cmd/server/workflow/workflow.swagger.json | 111 +++- cmd/server/workflow/workflow_service.go | 177 +++++- util/util.go | 28 +- 14 files changed, 1605 insertions(+), 208 deletions(-) delete mode 100644 cmd/client.go create mode 100644 cmd/client/client.go create mode 100644 cmd/server/workflow/common.go diff --git a/Gopkg.lock b/Gopkg.lock index cdcd5eeda3bc..62d12580b65b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1170,7 +1170,7 @@ [[projects]] branch = "release-11.0" - digest = "1:1bdf01b0bd052a5285908f7eb1da2e8d19c980cb64037ec117619603b5cd31d8" + digest = "1:d77899845faaec7b101fdf7c30bcaede6f17dad11818c8ab336fbfb88192e662" name = "k8s.io/client-go" packages = [ "discovery", @@ -1288,7 +1288,7 @@ "util/workqueue", ] pruneopts = "" - revision = "640d9f240853b749012f95f869868219deeb7433" + revision = "6d55c1b1f1ca8ad83d572bbc3ca55ba5526d9d71" [[projects]] branch = "release-1.14" @@ -1439,6 +1439,7 @@ "google.golang.org/grpc", "google.golang.org/grpc/codes", "google.golang.org/grpc/grpclog", + "google.golang.org/grpc/metadata", "google.golang.org/grpc/status", "gopkg.in/jcmturner/gokrb5.v5/client", "gopkg.in/jcmturner/gokrb5.v5/config", @@ -1490,6 +1491,7 @@ "k8s.io/code-generator/cmd/lister-gen", "k8s.io/kube-openapi/pkg/common", "k8s.io/utils/pointer", + "sigs.k8s.io/yaml", "upper.io/db.v3/lib/sqlbuilder", "upper.io/db.v3/mysql", "upper.io/db.v3/postgresql", diff --git a/cmd/argo/commands/common.go b/cmd/argo/commands/common.go index bc2fba565de8..541721e213a5 100644 --- a/cmd/argo/commands/common.go +++ b/cmd/argo/commands/common.go @@ -1,6 +1,7 @@ package commands import ( + "encoding/json" "fmt" "log" "os" @@ -8,7 +9,7 @@ import ( "strings" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - versioned "github.com/argoproj/argo/pkg/client/clientset/versioned" + "github.com/argoproj/argo/pkg/client/clientset/versioned" "github.com/argoproj/argo/pkg/client/clientset/versioned/typed/workflow/v1alpha1" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" @@ -62,7 +63,7 @@ func initializeSession() { } } -func initKubeClient() *kubernetes.Clientset { +func InitKubeClient() *kubernetes.Clientset { if clientset != nil { return clientset } @@ -71,7 +72,11 @@ func initKubeClient() *kubernetes.Clientset { if err != nil { log.Fatal(err) } + b,err :=json.Marshal(restConfig) + fmt.Println(err) + + fmt.Println(string(b)) // create the clientset clientset, err = kubernetes.NewForConfig(restConfig) if err != nil { @@ -85,7 +90,7 @@ func InitWorkflowClient(ns ...string) v1alpha1.WorkflowInterface { if wfClient != nil && (len(ns) == 0 || ns[0] == namespace) { return wfClient } - initKubeClient() + InitKubeClient() var err error if len(ns) > 0 { namespace = ns[0] diff --git a/cmd/argo/commands/retry.go b/cmd/argo/commands/retry.go index 95434b7de0a7..592755865a3d 100644 --- a/cmd/argo/commands/retry.go +++ b/cmd/argo/commands/retry.go @@ -22,7 +22,7 @@ func NewRetryCommand() *cobra.Command { cmd.HelpFunc()(cmd, args) os.Exit(1) } - kubeClient := initKubeClient() + kubeClient := InitKubeClient() wfClient := InitWorkflowClient() wf, err := wfClient.Get(args[0], metav1.GetOptions{}) if err != nil { diff --git a/cmd/client.go b/cmd/client.go deleted file mode 100644 index b688bbfd73ea..000000000000 --- a/cmd/client.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "context" - "fmt" - "github.com/argoproj/argo/cmd/server/workflow" - "google.golang.org/grpc" -) - -func main(){ - - conn, err := grpc.Dial("localhost:8082", grpc.WithInsecure()) - if err != nil { - fmt.Println(err) - } - defer conn.Close() - name := "scripts-bash-5ksp4" - query := workflow.WorkflowQuery{Name: name,} - client := workflow.NewWorkflowServiceClient(conn) - wflist, err :=client.List(context.TODO(),&query) - if err !=nil { - fmt.Println("errr",err) - } - - //byte1, err := wflist.Workflows.Marshal() - for inx,_ := range wflist.Workflows { - fmt.Println("Response:", wflist.Workflows[inx].Name) - fmt.Println("/n /n") - } - - -} diff --git a/cmd/client/client.go b/cmd/client/client.go new file mode 100644 index 000000000000..ac72eb8c4955 --- /dev/null +++ b/cmd/client/client.go @@ -0,0 +1,128 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "github.com/argoproj/argo/cmd/server/workflow" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/util" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "os" + "sigs.k8s.io/yaml" + +) + +var wfStr = ` +apiVersion: argoproj.io/v1alpha1 +kind: Workflow +metadata: + generateName: hello-world- +spec: + entrypoint: whalesay + templates: + - name: whalesay + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["hello world"] +` + + +func unmarshalWF(yamlStr string) *wfv1.Workflow { + var wf wfv1.Workflow + err := yaml.Unmarshal([]byte(yamlStr), &wf) + if err != nil { + panic(err) + } + return &wf +} + +func homeDir() string { + if h := os.Getenv("HOME"); h != "" { + return h + } + return os.Getenv("USERPROFILE") // windows +} + + + +//func generate(){ +// +// kubeConfigFlags := genericclioptions.NewConfigFlags(true) +// //kubeConfigFlags.AddFlags(flags) +// matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags) +// //matchVersionKubeConfigFlags.AddFlags(cmds.PersistentFlags()) +// f := cmdutil.NewFactory(nil) +// f.RESTClient() +// +//} +func main(){ + //generate() + conn, err := grpc.Dial("localhost:8082", grpc.WithInsecure()) + if err != nil { + fmt.Println(err) + } + defer conn.Close() + client := workflow.NewWorkflowServiceClient(conn) + //wf := unmarshalWF(wfStr) + config := util.InitKubeClient() + // + ////tc, err :=config.TransportConfig() + // + var clientConfig workflow.ClientConfig + // + clientConfig.Host = config.Host + clientConfig.APIPath = config.APIPath + clientConfig.TLSClientConfig = config.TLSClientConfig + clientConfig.Username = config.Username + clientConfig.Password = config.Password + clientConfig.AuthProvider = config.AuthProvider + // + // + // + by,err := json.Marshal(clientConfig) + fmt.Println(err) + // + md := metadata.Pairs(workflow.CLIENT_REST_CONFIG, string(by), workflow.AUTH_TOKEN, clientConfig.AuthProvider.Config["access-token"]) + ctx := metadata.NewOutgoingContext(context.Background(), md) + //wq := workflow.WorkflowQuery{} + //created, err :=client.Get(ctx,&wq) + // + //fmt.Println("errr",err) + // + fmt.Println(string(by)) + wq := workflow.WorkflowQuery{Name:"retry-to-completion-d5j29", Namespace:"workflows"} + queried, err := client.Get(ctx, &wq) + if err !=nil { + fmt.Println("errr",err) + } + fmt.Println(queried) + //var wuq workflow.WorkflowUpdateQuery + ////wuq.Workflow = queried + ////wur, err := client.Retry(context.TODO(), &wuq) + //// + ////if err !=nil { + //// fmt.Println("errr",err) + ////} + ////fmt.Println(wur) + //// + ////name := "scripts-bash-5ksp4" + ////query := workflow.WorkflowQuery{Name: name,} + //// + //// + //created, err :=client.Create(ctx,wf) + //if err !=nil { + // fmt.Println("errr",err) + //} + //fmt.Println(created) + // + ////byte1, err := wflist.Workflows.Marshal() + ////for inx,_ := range wflist.Workflows { + //// fmt.Println("Response:", wflist.Workflows[inx].Name) + //// + ////} + + +} diff --git a/cmd/main.go b/cmd/main.go index c835840a7215..ffa1cb150085 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -10,8 +10,13 @@ import ( "github.com/argoproj/pkg/stats" "github.com/spf13/cobra" "golang.org/x/net/context" + _ "k8s.io/client-go/plugin/pkg/client/auth/azure" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" + _ "k8s.io/client-go/plugin/pkg/client/auth/openstack" "k8s.io/client-go/tools/clientcmd" "os" + "strconv" "time" ) @@ -25,6 +30,7 @@ func NewRootCommand() *cobra.Command { var ( clientConfig clientcmd.ClientConfig logLevel string // --loglevel + enableClientAuth string ) var command = cobra.Command{ @@ -36,6 +42,7 @@ func NewRootCommand() *cobra.Command { stats.StartStatsTicker(5 * time.Minute) config, err := clientConfig.ClientConfig() + if err != nil { return err } @@ -47,21 +54,19 @@ func NewRootCommand() *cobra.Command { return err } - //kubeclientset := kubernetes.NewForConfigOrDie(config) wflientset := wfclientset.NewForConfigOrDie(config) if err != nil { return err } - ctx, cancel := context.WithCancel(context.Background()) - - var opts = server.ArgoServerOpts{Namespace: namespace, KubeClientset: wflientset} + var clientAuth bool + clientAuth, err =strconv.ParseBool( enableClientAuth) + var opts = server.ArgoServerOpts{Namespace: namespace, KubeClientset: wflientset, EnableClientAuth: clientAuth} argoSvr := server.NewArgoServer(ctx, opts ) defer cancel() go argoSvr.Run(ctx,8082) - // Wait forever select {} @@ -70,13 +75,8 @@ func NewRootCommand() *cobra.Command { clientConfig = kubecli.AddKubectlFlagsToCmd(&command) command.AddCommand(cmdutil.NewVersionCmd(CLIName)) - //command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration") - //command.Flags().StringVar(&executorImage, "executor-image", "", "Executor image to use (overrides value in configmap)") - //command.Flags().StringVar(&executorImagePullPolicy, "executor-image-pull-policy", "", "Executor imagePullPolicy to use (overrides value in configmap)") + command.Flags().StringVar(&enableClientAuth, "enableClientAuth", "false", "") command.Flags().StringVar(&logLevel, "loglevel", "debug", "Set the logging level. One of: debug|info|warn|error") - //command.Flags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level") - //command.Flags().IntVar(&workflowWorkers, "workflow-workers", 8, "Number of workflow workers") - //command.Flags().IntVar(&podWorkers, "pod-workers", 8, "Number of pod workers") return &command } diff --git a/cmd/server/argoserver.go b/cmd/server/argoserver.go index 587c7e43e713..005c77485ef2 100644 --- a/cmd/server/argoserver.go +++ b/cmd/server/argoserver.go @@ -15,26 +15,27 @@ import ( "fmt" "k8s.io/client-go/kubernetes" - //"net" "net/http" "time" ) type ArgoServer struct { - Namespace string - KubeClientset kubernetes.Clientset - wfClientSet *versioned.Clientset + Namespace string + KubeClientset kubernetes.Clientset + wfClientSet *versioned.Clientset + EnableClientAuth bool } type ArgoServerOpts struct { - Insecure bool - Namespace string - KubeClientset *versioned.Clientset + Insecure bool + Namespace string + KubeClientset *versioned.Clientset + EnableClientAuth bool } -func NewArgoServer(ctx context.Context, opts ArgoServerOpts) *ArgoServer{ +func NewArgoServer(ctx context.Context, opts ArgoServerOpts) *ArgoServer { - return &ArgoServer{Namespace: opts.Namespace, wfClientSet: opts.KubeClientset } + return &ArgoServer{Namespace: opts.Namespace, wfClientSet: opts.KubeClientset, EnableClientAuth: opts.EnableClientAuth} } var backoff = wait.Backoff{ @@ -43,7 +44,8 @@ var backoff = wait.Backoff{ Factor: 1.0, Jitter: 0.1, } -func (as *ArgoServer)Run(ctx context.Context, port int){ + +func (as *ArgoServer) Run(ctx context.Context, port int) { grpcs := as.newGRPCServer() //grpcWebS := grpcweb.WrapServer(grpcs) @@ -52,18 +54,6 @@ func (as *ArgoServer)Run(ctx context.Context, port int){ log.Fatalf("failed to listen: %v", err) } grpcs.Serve(lis) - - //// Start listener - //var realErr error - //_ = wait.ExponentialBackoff(backoff, func() (bool, error) { - // http.ListenAndServe(":8082", grpcs) - // if realErr != nil { - // log.Warnf("failed listen: %v", realErr) - // return false, nil - // } - // return true, nil - //}) - //errors.CheckError(realErr) } func (as *ArgoServer) newGRPCServer() *grpc.Server { @@ -77,7 +67,7 @@ func (as *ArgoServer) newGRPCServer() *grpc.Server { } grpcS := grpc.NewServer(sOpts...) - workflowService := workflow.NewServer(as.Namespace, *as.wfClientSet) + workflowService := workflow.NewServer(as.Namespace, *as.wfClientSet, as.EnableClientAuth) workflow.RegisterWorkflowServiceServer(grpcS, workflowService) return grpcS } @@ -184,5 +174,3 @@ func (a *ArgoServer) translateGrpcCookieHeader(ctx context.Context, w http.Respo return nil } - - diff --git a/cmd/server/workflow/common.go b/cmd/server/workflow/common.go new file mode 100644 index 000000000000..61c0f51a6dcc --- /dev/null +++ b/cmd/server/workflow/common.go @@ -0,0 +1,68 @@ +package workflow + +import ( + "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/util/flowcontrol" + "time" +) + +const CLIENT_REST_CONFIG = "rest.config" +const AUTH_TOKEN = "auth.token" + + +type ClientConfig struct { + + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + // APIPath is a sub-path that points to an API root. + APIPath string + + // ContentConfig contains settings that affect how objects are transformed when + // sent to the server. + rest.ContentConfig + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + + // Impersonate is the configuration that RESTClient will use for impersonation. + Impersonate rest.ImpersonationConfig + + AuthProvider *clientcmdapi.AuthProviderConfig + + // TLSClientConfig contains settings to enable transport layer security + rest.TLSClientConfig + + // UserAgent is an optional field that specifies the caller of this request. + UserAgent string + + // QPS indicates the maximum QPS to the master from this client. + // If it's zero, the created RESTClient will use DefaultQPS: 5 + QPS float32 + + // Maximum burst for throttle. + // If it's zero, the created RESTClient will use DefaultBurst: 10. + Burst int + + // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst + RateLimiter flowcontrol.RateLimiter + + // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. + Timeout time.Duration + + +} \ No newline at end of file diff --git a/cmd/server/workflow/workflow.pb.go b/cmd/server/workflow/workflow.pb.go index 94be449d1b0c..9205a78e3605 100644 --- a/cmd/server/workflow/workflow.pb.go +++ b/cmd/server/workflow/workflow.pb.go @@ -16,6 +16,7 @@ import ( _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" io "io" + _ "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" ) @@ -125,10 +126,12 @@ func (m *WorkflowListResponse) GetWorkflows() []*v1alpha1.Workflow { } type WorkflowResponse struct { - Workflows *v1alpha1.Workflow `protobuf:"bytes,1,opt,name=workflows,proto3" json:"workflows,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"` + Message string `protobuf:"bytes,3,opt,name=Message,proto3" json:"Message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *WorkflowResponse) Reset() { *m = WorkflowResponse{} } @@ -164,15 +167,29 @@ func (m *WorkflowResponse) XXX_DiscardUnknown() { var xxx_messageInfo_WorkflowResponse proto.InternalMessageInfo -func (m *WorkflowResponse) GetWorkflows() *v1alpha1.Workflow { +func (m *WorkflowResponse) GetName() string { if m != nil { - return m.Workflows + return m.Name } - return nil + return "" +} + +func (m *WorkflowResponse) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func (m *WorkflowResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" } type WorkflowQuery struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` StartIdx int32 `protobuf:"varint,3,opt,name=StartIdx,proto3" json:"StartIdx,omitempty"` PageSize int32 `protobuf:"varint,4,opt,name=PageSize,proto3" json:"PageSize,omitempty"` @@ -242,45 +259,113 @@ func (m *WorkflowQuery) GetPageSize() int32 { return 0 } +type WorkflowUpdateQuery struct { + Workflow *v1alpha1.Workflow `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + Memoized bool `protobuf:"varint,2,opt,name=memoized,proto3" json:"memoized,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowUpdateQuery) Reset() { *m = WorkflowUpdateQuery{} } +func (m *WorkflowUpdateQuery) String() string { return proto.CompactTextString(m) } +func (*WorkflowUpdateQuery) ProtoMessage() {} +func (*WorkflowUpdateQuery) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{4} +} +func (m *WorkflowUpdateQuery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowUpdateQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowUpdateQuery.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowUpdateQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowUpdateQuery.Merge(m, src) +} +func (m *WorkflowUpdateQuery) XXX_Size() int { + return m.Size() +} +func (m *WorkflowUpdateQuery) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowUpdateQuery.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowUpdateQuery proto.InternalMessageInfo + +func (m *WorkflowUpdateQuery) GetWorkflow() *v1alpha1.Workflow { + if m != nil { + return m.Workflow + } + return nil +} + +func (m *WorkflowUpdateQuery) GetMemoized() bool { + if m != nil { + return m.Memoized + } + return false +} + func init() { proto.RegisterType((*WorkflowCreateResponse)(nil), "workflow.WorkflowCreateResponse") proto.RegisterType((*WorkflowListResponse)(nil), "workflow.WorkflowListResponse") proto.RegisterType((*WorkflowResponse)(nil), "workflow.WorkflowResponse") proto.RegisterType((*WorkflowQuery)(nil), "workflow.WorkflowQuery") + proto.RegisterType((*WorkflowUpdateQuery)(nil), "workflow.WorkflowUpdateQuery") } func init() { proto.RegisterFile("cmd/server/workflow/workflow.proto", fileDescriptor_192bc67c39cca05a) } var fileDescriptor_192bc67c39cca05a = []byte{ - // 438 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcd, 0x8e, 0xd3, 0x30, - 0x10, 0x96, 0xdb, 0xb2, 0xda, 0x1a, 0x21, 0x16, 0xb3, 0x40, 0x08, 0xab, 0x28, 0xca, 0xa9, 0x42, - 0xc8, 0x56, 0x17, 0x4e, 0x2b, 0x71, 0x61, 0x0f, 0x08, 0x09, 0x21, 0x48, 0x91, 0x90, 0x96, 0x93, - 0x37, 0x1d, 0xbc, 0xa1, 0x6d, 0x1c, 0xd9, 0xde, 0x2c, 0xe5, 0xc8, 0x81, 0x17, 0xe0, 0xa5, 0x38, - 0x70, 0x40, 0xe2, 0x05, 0x50, 0xc5, 0x83, 0xa0, 0x38, 0xb5, 0x83, 0x48, 0xcb, 0x85, 0x3d, 0xe5, - 0x9b, 0xf9, 0x32, 0xdf, 0x8c, 0xe7, 0x07, 0x27, 0xd9, 0x62, 0xca, 0x34, 0xa8, 0x0a, 0x14, 0xbb, - 0x90, 0x6a, 0xf6, 0x6e, 0x2e, 0x2f, 0x3c, 0xa0, 0xa5, 0x92, 0x46, 0x92, 0x5d, 0x67, 0x87, 0xfb, - 0x42, 0x0a, 0x69, 0x9d, 0xac, 0x46, 0x0d, 0x1f, 0x1e, 0x08, 0x29, 0xc5, 0x1c, 0x18, 0x2f, 0x73, - 0xc6, 0x8b, 0x42, 0x1a, 0x6e, 0x72, 0x59, 0xe8, 0x35, 0x7b, 0x2c, 0x72, 0x73, 0x76, 0x7e, 0x4a, - 0x33, 0xb9, 0x60, 0x5c, 0xd9, 0xf0, 0xf7, 0x16, 0xb0, 0x72, 0x26, 0xea, 0x18, 0xdd, 0x26, 0xae, - 0xc6, 0x7c, 0x5e, 0x9e, 0xf1, 0x31, 0x13, 0x50, 0x80, 0xe2, 0x06, 0xa6, 0x8d, 0x48, 0xf2, 0x08, - 0xdf, 0x7e, 0xb3, 0xfe, 0xe9, 0x58, 0x01, 0x37, 0x90, 0x82, 0x2e, 0x65, 0xa1, 0x81, 0x84, 0x78, - 0x57, 0xad, 0x71, 0x80, 0x62, 0x34, 0x1a, 0xa6, 0xde, 0x4e, 0x34, 0xde, 0x77, 0x51, 0xcf, 0x73, - 0x6d, 0x7c, 0xcc, 0x5b, 0x3c, 0x74, 0x29, 0x75, 0x80, 0xe2, 0xfe, 0xe8, 0xea, 0xe1, 0x63, 0xda, - 0x96, 0x49, 0x5d, 0x99, 0x16, 0xd0, 0x72, 0x26, 0x68, 0x5d, 0x26, 0xf5, 0x6d, 0x71, 0x65, 0x52, - 0xa7, 0x9e, 0xb6, 0x7a, 0x89, 0xc4, 0x7b, 0xde, 0xbd, 0x25, 0x21, 0xba, 0xd4, 0x84, 0x4b, 0x7c, - 0xcd, 0xb9, 0x5f, 0x9d, 0x83, 0x5a, 0x12, 0x82, 0x07, 0x2f, 0xf8, 0xc2, 0xb5, 0xc3, 0x62, 0x72, - 0x80, 0x87, 0xf5, 0x57, 0x97, 0x3c, 0x83, 0xa0, 0x67, 0x89, 0xd6, 0x51, 0x37, 0x71, 0x62, 0xb8, - 0x32, 0xcf, 0xa6, 0x1f, 0x82, 0x7e, 0x8c, 0x46, 0x57, 0x52, 0x6f, 0xd7, 0xdc, 0x4b, 0x2e, 0x60, - 0x92, 0x7f, 0x84, 0x60, 0xd0, 0x70, 0xce, 0x3e, 0xfc, 0xd6, 0xc3, 0xd7, 0x5d, 0xee, 0x09, 0xa8, - 0x2a, 0xcf, 0x80, 0x7c, 0x46, 0x78, 0xa7, 0x99, 0x11, 0xf9, 0xbf, 0x37, 0x86, 0x71, 0x4b, 0x6e, - 0x1e, 0x7e, 0x72, 0xef, 0xd3, 0x8f, 0x5f, 0x5f, 0x7a, 0xb7, 0x92, 0x3d, 0xbb, 0x7b, 0xd5, 0xd8, - 0x6f, 0xd2, 0x11, 0xba, 0x4f, 0x4e, 0xf0, 0xa0, 0x9e, 0x3a, 0xb9, 0xd3, 0x95, 0xb1, 0x7d, 0x0a, - 0xa3, 0x2e, 0xf1, 0xe7, 0x9a, 0x24, 0x77, 0xad, 0xfa, 0x4d, 0x72, 0xe3, 0x6f, 0x75, 0x4d, 0x5e, - 0xe3, 0xfe, 0x53, 0xf8, 0x87, 0x74, 0xd8, 0x25, 0xbc, 0x6c, 0x60, 0x65, 0x09, 0xe9, 0x14, 0xfd, - 0xe4, 0xe8, 0xeb, 0x2a, 0x42, 0xdf, 0x57, 0x11, 0xfa, 0xb9, 0x8a, 0xd0, 0xc9, 0x83, 0xad, 0x87, - 0xb3, 0xe1, 0x66, 0x4f, 0x77, 0xec, 0xa1, 0x3c, 0xfc, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x91, - 0x47, 0xab, 0xd1, 0x03, 0x00, 0x00, + // 628 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0x4d, 0x6b, 0x13, 0x41, + 0x18, 0xc7, 0x99, 0xbe, 0xa4, 0xc9, 0x88, 0xa8, 0xd3, 0x52, 0xd7, 0xa5, 0x86, 0xb2, 0xa7, 0x52, + 0x64, 0x97, 0xd6, 0x22, 0x52, 0xf4, 0x62, 0x05, 0x11, 0x54, 0x74, 0xa3, 0xa8, 0xf5, 0x20, 0xd3, + 0xec, 0xe3, 0x66, 0x4d, 0x76, 0x67, 0x99, 0x99, 0xa4, 0xa6, 0x22, 0x82, 0x67, 0x6f, 0x7e, 0x04, + 0xdf, 0xbe, 0x85, 0x67, 0x8f, 0x82, 0x5f, 0x40, 0x82, 0x1f, 0x44, 0x66, 0xb2, 0x33, 0x1b, 0xd9, + 0xe6, 0x94, 0x4b, 0x6e, 0xcf, 0x4b, 0x9e, 0xff, 0xf3, 0xdb, 0xc9, 0xf3, 0xec, 0x2c, 0xf6, 0xda, + 0x69, 0x14, 0x08, 0xe0, 0x03, 0xe0, 0xc1, 0x31, 0xe3, 0xdd, 0x57, 0x3d, 0x76, 0x6c, 0x0d, 0x3f, + 0xe7, 0x4c, 0x32, 0x52, 0x37, 0xbe, 0xbb, 0x16, 0xb3, 0x98, 0xe9, 0x60, 0xa0, 0xac, 0x71, 0xde, + 0xdd, 0x88, 0x19, 0x8b, 0x7b, 0x10, 0xd0, 0x3c, 0x09, 0x68, 0x96, 0x31, 0x49, 0x65, 0xc2, 0x32, + 0x51, 0x64, 0xf7, 0xba, 0xd7, 0x85, 0x9f, 0x30, 0x95, 0x4d, 0x69, 0xbb, 0x93, 0x64, 0xc0, 0x87, + 0x41, 0xde, 0x8d, 0x55, 0x40, 0x04, 0x29, 0x48, 0x1a, 0x0c, 0x76, 0x82, 0x18, 0x32, 0xe0, 0x54, + 0x42, 0x54, 0x54, 0x1d, 0xc4, 0x89, 0xec, 0xf4, 0x8f, 0xfc, 0x36, 0x4b, 0x03, 0xca, 0x75, 0xd3, + 0xd7, 0xda, 0x28, 0x4b, 0x2d, 0xee, 0x60, 0x87, 0xf6, 0xf2, 0x0e, 0xad, 0x88, 0x78, 0x7b, 0x78, + 0xfd, 0x69, 0xf1, 0xa3, 0x03, 0x0e, 0x54, 0x42, 0x08, 0x22, 0x67, 0x99, 0x00, 0xe2, 0xe2, 0x3a, + 0x2f, 0x6c, 0x07, 0x6d, 0xa2, 0xad, 0x46, 0x68, 0x7d, 0x4f, 0xe0, 0x35, 0x53, 0x75, 0x2f, 0x11, + 0xd2, 0xd6, 0xbc, 0xc0, 0x0d, 0xd3, 0x52, 0x38, 0x68, 0x73, 0x71, 0xeb, 0xcc, 0xee, 0x4d, 0xbf, + 0xc4, 0xf4, 0x0d, 0xa6, 0x36, 0xfc, 0xbc, 0x1b, 0xfb, 0x0a, 0xd3, 0xb7, 0x87, 0x69, 0x30, 0x7d, + 0xa3, 0x1e, 0x96, 0x7a, 0xde, 0x33, 0x7c, 0xde, 0x86, 0x4d, 0x43, 0x82, 0x97, 0x1e, 0xd0, 0xd4, + 0x00, 0x6a, 0x9b, 0xac, 0xe3, 0x5a, 0x4b, 0x52, 0xd9, 0x17, 0xce, 0x82, 0x8e, 0x16, 0x1e, 0x71, + 0xf0, 0xca, 0x7d, 0x10, 0x82, 0xc6, 0xe0, 0x2c, 0xea, 0x84, 0x71, 0xbd, 0x21, 0x3e, 0x6b, 0x94, + 0x1f, 0xf5, 0x81, 0x0f, 0x95, 0x6c, 0x36, 0x21, 0xab, 0x6c, 0xb2, 0x81, 0x1b, 0x4a, 0x5e, 0xe4, + 0xb4, 0x0d, 0x85, 0x72, 0x19, 0x50, 0xa7, 0xd5, 0x92, 0x94, 0xcb, 0xbb, 0xd1, 0x1b, 0xad, 0xbe, + 0x1c, 0x5a, 0x5f, 0xe5, 0x1e, 0xd2, 0x18, 0x5a, 0xc9, 0x09, 0x38, 0x4b, 0xe3, 0x9c, 0xf1, 0xbd, + 0x8f, 0x08, 0xaf, 0x9a, 0xde, 0x4f, 0xf2, 0x88, 0x4a, 0x18, 0x13, 0x3c, 0xc7, 0x76, 0xa4, 0x34, + 0xc5, 0xcc, 0x07, 0x69, 0xe5, 0x14, 0x4e, 0x0a, 0x29, 0x4b, 0x4e, 0x20, 0xd2, 0xcf, 0x51, 0x0f, + 0xad, 0xbf, 0xfb, 0x03, 0xe3, 0x73, 0xa6, 0xa4, 0x05, 0x7c, 0x90, 0xb4, 0x81, 0x7c, 0x41, 0xb8, + 0x36, 0x9e, 0x0d, 0x32, 0x1b, 0x83, 0x3b, 0x5b, 0xb9, 0xb7, 0xf1, 0xe1, 0xf7, 0xdf, 0x4f, 0x0b, + 0xeb, 0xde, 0x05, 0xbd, 0x46, 0x83, 0x1d, 0x3b, 0xde, 0x62, 0x1f, 0x6d, 0x93, 0xf7, 0x78, 0xf1, + 0x0e, 0x48, 0x72, 0xb1, 0x54, 0xf8, 0xef, 0x3f, 0x9d, 0xb5, 0xf9, 0xa6, 0x6e, 0xee, 0x12, 0xa7, + 0xd2, 0x3c, 0x78, 0xab, 0xe6, 0xe3, 0x1d, 0x39, 0xc4, 0x4b, 0x6a, 0x19, 0xa6, 0x13, 0x34, 0xab, + 0x89, 0xc9, 0xed, 0xf1, 0x2e, 0xe9, 0x16, 0xab, 0xa4, 0xfa, 0x7c, 0xe4, 0x25, 0xae, 0xdd, 0x86, + 0x1e, 0x48, 0x98, 0xae, 0xee, 0x56, 0x13, 0x56, 0xb9, 0x80, 0xdf, 0x9e, 0x0e, 0xff, 0x19, 0xe1, + 0xe5, 0x10, 0x24, 0x1f, 0x92, 0xcb, 0x55, 0x9d, 0x89, 0xc1, 0x9c, 0xf5, 0x18, 0x6f, 0x68, 0x92, + 0x6b, 0xee, 0xf6, 0x29, 0x24, 0xb6, 0x4c, 0xbd, 0xf1, 0x22, 0x2a, 0xa9, 0xaf, 0xd9, 0xf6, 0xcb, + 0xd1, 0xfd, 0x86, 0x70, 0x3d, 0x04, 0xd1, 0x3f, 0x4a, 0x13, 0x39, 0xd7, 0xa0, 0x6a, 0x67, 0x14, + 0x68, 0x0a, 0x73, 0x8d, 0xf9, 0x15, 0xe1, 0x95, 0x56, 0x5f, 0xe4, 0x90, 0x45, 0x73, 0xcd, 0xf9, + 0x1d, 0xe1, 0xc6, 0x63, 0xe0, 0x69, 0x92, 0xa9, 0xb7, 0xd0, 0x1c, 0x93, 0xde, 0xda, 0xff, 0x39, + 0x6a, 0xa2, 0x5f, 0xa3, 0x26, 0xfa, 0x33, 0x6a, 0xa2, 0xc3, 0x2b, 0x53, 0xaf, 0xe8, 0x53, 0xbe, + 0x29, 0x8e, 0x6a, 0xfa, 0x4a, 0xbe, 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe8, 0xf5, 0x0b, 0x8f, + 0x71, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -295,9 +380,15 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type WorkflowServiceClient interface { - Create(ctx context.Context, in *v1alpha1.Workflow, opts ...grpc.CallOption) (*WorkflowCreateResponse, error) + Create(ctx context.Context, in *v1alpha1.Workflow, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Get(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) List(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowListResponse, error) - Get(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowResponse, error) + Delete(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowResponse, error) + Retry(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Resubmit(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Resume(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Suspend(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Terminate(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) } type workflowServiceClient struct { @@ -308,8 +399,8 @@ func NewWorkflowServiceClient(cc *grpc.ClientConn) WorkflowServiceClient { return &workflowServiceClient{cc} } -func (c *workflowServiceClient) Create(ctx context.Context, in *v1alpha1.Workflow, opts ...grpc.CallOption) (*WorkflowCreateResponse, error) { - out := new(WorkflowCreateResponse) +func (c *workflowServiceClient) Create(ctx context.Context, in *v1alpha1.Workflow, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { + out := new(v1alpha1.Workflow) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Create", in, out, opts...) if err != nil { return nil, err @@ -317,6 +408,15 @@ func (c *workflowServiceClient) Create(ctx context.Context, in *v1alpha1.Workflo return out, nil } +func (c *workflowServiceClient) Get(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { + out := new(v1alpha1.Workflow) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *workflowServiceClient) List(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowListResponse, error) { out := new(WorkflowListResponse) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/List", in, out, opts...) @@ -326,9 +426,54 @@ func (c *workflowServiceClient) List(ctx context.Context, in *WorkflowQuery, opt return out, nil } -func (c *workflowServiceClient) Get(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowResponse, error) { +func (c *workflowServiceClient) Delete(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowResponse, error) { out := new(WorkflowResponse) - err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Get", in, out, opts...) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) Retry(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { + out := new(v1alpha1.Workflow) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Retry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) Resubmit(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { + out := new(v1alpha1.Workflow) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Resubmit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) Resume(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { + out := new(v1alpha1.Workflow) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Resume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) Suspend(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { + out := new(v1alpha1.Workflow) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Suspend", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowServiceClient) Terminate(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { + out := new(v1alpha1.Workflow) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Terminate", in, out, opts...) if err != nil { return nil, err } @@ -337,9 +482,15 @@ func (c *workflowServiceClient) Get(ctx context.Context, in *WorkflowQuery, opts // WorkflowServiceServer is the server API for WorkflowService service. type WorkflowServiceServer interface { - Create(context.Context, *v1alpha1.Workflow) (*WorkflowCreateResponse, error) + Create(context.Context, *v1alpha1.Workflow) (*v1alpha1.Workflow, error) + Get(context.Context, *WorkflowQuery) (*v1alpha1.Workflow, error) List(context.Context, *WorkflowQuery) (*WorkflowListResponse, error) - Get(context.Context, *WorkflowQuery) (*WorkflowResponse, error) + Delete(context.Context, *WorkflowQuery) (*WorkflowResponse, error) + Retry(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) + Resubmit(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) + Resume(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) + Suspend(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) + Terminate(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) } func RegisterWorkflowServiceServer(s *grpc.Server, srv WorkflowServiceServer) { @@ -364,6 +515,24 @@ func _WorkflowService_Create_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _WorkflowService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Get(ctx, req.(*WorkflowQuery)) + } + return interceptor(ctx, in, info, handler) +} + func _WorkflowService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(WorkflowQuery) if err := dec(in); err != nil { @@ -382,20 +551,110 @@ func _WorkflowService_List_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } -func _WorkflowService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _WorkflowService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(WorkflowQuery) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(WorkflowServiceServer).Get(ctx, in) + return srv.(WorkflowServiceServer).Delete(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/workflow.WorkflowService/Get", + FullMethod: "/workflow.WorkflowService/Delete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).Get(ctx, req.(*WorkflowQuery)) + return srv.(WorkflowServiceServer).Delete(ctx, req.(*WorkflowQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_Retry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowUpdateQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Retry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/Retry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Retry(ctx, req.(*WorkflowUpdateQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_Resubmit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowUpdateQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Resubmit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/Resubmit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Resubmit(ctx, req.(*WorkflowUpdateQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowUpdateQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Resume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/Resume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Resume(ctx, req.(*WorkflowUpdateQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_Suspend_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowUpdateQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Suspend(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/Suspend", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Suspend(ctx, req.(*WorkflowUpdateQuery)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowService_Terminate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowUpdateQuery) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Terminate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/Terminate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Terminate(ctx, req.(*WorkflowUpdateQuery)) } return interceptor(ctx, in, info, handler) } @@ -408,13 +667,37 @@ var _WorkflowService_serviceDesc = grpc.ServiceDesc{ MethodName: "Create", Handler: _WorkflowService_Create_Handler, }, + { + MethodName: "Get", + Handler: _WorkflowService_Get_Handler, + }, { MethodName: "List", Handler: _WorkflowService_List_Handler, }, { - MethodName: "Get", - Handler: _WorkflowService_Get_Handler, + MethodName: "Delete", + Handler: _WorkflowService_Delete_Handler, + }, + { + MethodName: "Retry", + Handler: _WorkflowService_Retry_Handler, + }, + { + MethodName: "Resubmit", + Handler: _WorkflowService_Resubmit_Handler, + }, + { + MethodName: "Resume", + Handler: _WorkflowService_Resume_Handler, + }, + { + MethodName: "Suspend", + Handler: _WorkflowService_Suspend_Handler, + }, + { + MethodName: "Terminate", + Handler: _WorkflowService_Terminate_Handler, }, }, Streams: []grpc.StreamDesc{}, @@ -496,15 +779,23 @@ func (m *WorkflowResponse) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Workflows != nil { + if len(m.Name) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintWorkflow(dAtA, i, uint64(m.Workflows.Size())) - n1, err := m.Workflows.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Status) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + } + if len(m.Message) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -555,6 +846,47 @@ func (m *WorkflowQuery) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *WorkflowUpdateQuery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowUpdateQuery) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Workflow != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintWorkflow(dAtA, i, uint64(m.Workflow.Size())) + n1, err := m.Workflow.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Memoized { + dAtA[i] = 0x10 + i++ + if m.Memoized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func encodeVarintWorkflow(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -604,8 +936,16 @@ func (m *WorkflowResponse) Size() (n int) { } var l int _ = l - if m.Workflows != nil { - l = m.Workflows.Size() + l = len(m.Name) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.Message) + if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } if m.XXX_unrecognized != nil { @@ -640,6 +980,25 @@ func (m *WorkflowQuery) Size() (n int) { return n } +func (m *WorkflowUpdateQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Workflow != nil { + l = m.Workflow.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.Memoized { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovWorkflow(x uint64) (n int) { for { n++ @@ -858,9 +1217,9 @@ func (m *WorkflowResponse) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowWorkflow @@ -870,27 +1229,87 @@ func (m *WorkflowResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthWorkflow } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthWorkflow } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Workflows == nil { - m.Workflows = &v1alpha1.Workflow{} + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - if err := m.Workflows.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1073,6 +1492,116 @@ func (m *WorkflowQuery) Unmarshal(dAtA []byte) error { } return nil } +func (m *WorkflowUpdateQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowUpdateQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowUpdateQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Workflow == nil { + m.Workflow = &v1alpha1.Workflow{} + } + if err := m.Workflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Memoized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Memoized = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipWorkflow(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/cmd/server/workflow/workflow.pb.gw.go b/cmd/server/workflow/workflow.pb.gw.go index a72907155882..6aba99f04977 100644 --- a/cmd/server/workflow/workflow.pb.gw.go +++ b/cmd/server/workflow/workflow.pb.gw.go @@ -46,6 +46,44 @@ func request_WorkflowService_Create_0(ctx context.Context, marshaler runtime.Mar } +var ( + filter_WorkflowService_Get_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_WorkflowService_Get_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowQuery + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Get_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + var ( filter_WorkflowService_List_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} ) @@ -67,21 +105,269 @@ func request_WorkflowService_List_0(ctx context.Context, marshaler runtime.Marsh } var ( - filter_WorkflowService_Get_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} + filter_WorkflowService_Delete_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) -func request_WorkflowService_Get_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { +func request_WorkflowService_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq WorkflowQuery var metadata runtime.ServerMetadata + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + } + + protoReq.Name, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + } + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Get_0); err != nil { + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Delete_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_Retry_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} +) + +func request_WorkflowService_Retry_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateQuery + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["workflow.metadata.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Retry_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Retry(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_Resubmit_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} +) + +func request_WorkflowService_Resubmit_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateQuery + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["workflow.metadata.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Resubmit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Resubmit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_Resume_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} +) + +func request_WorkflowService_Resume_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateQuery + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["workflow.metadata.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Resume_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Resume(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_Suspend_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} +) + +func request_WorkflowService_Suspend_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateQuery + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["workflow.metadata.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Suspend_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Suspend(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +var ( + filter_WorkflowService_Terminate_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} +) + +func request_WorkflowService_Terminate_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateQuery + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["workflow.metadata.name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Terminate_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Terminate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } @@ -144,6 +430,26 @@ func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.Serv }) + mux.Handle("GET", pattern_WorkflowService_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_WorkflowService_List_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -164,7 +470,7 @@ func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.Serv }) - mux.Handle("GET", pattern_WorkflowService_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("DELETE", pattern_WorkflowService_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -173,14 +479,114 @@ func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.Serv runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_WorkflowService_Get_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_WorkflowService_Delete_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_WorkflowService_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_WorkflowService_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Retry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Retry_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Retry_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Resubmit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Resubmit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Resubmit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Resume_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Resume_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Resume_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Suspend_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Suspend_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Suspend_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Terminate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Terminate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Terminate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -188,17 +594,41 @@ func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.Serv } var ( - pattern_WorkflowService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflow"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflows"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "name"}, "", runtime.AssumeColonVerbOpt(true))) pattern_WorkflowService_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflows"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflow"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_Retry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_Resubmit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_Resume_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_Suspend_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_Terminate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( forward_WorkflowService_Create_0 = runtime.ForwardResponseMessage + forward_WorkflowService_Get_0 = runtime.ForwardResponseMessage + forward_WorkflowService_List_0 = runtime.ForwardResponseMessage - forward_WorkflowService_Get_0 = runtime.ForwardResponseMessage + forward_WorkflowService_Delete_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_Retry_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_Resubmit_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_Resume_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_Suspend_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_Terminate_0 = runtime.ForwardResponseMessage ) diff --git a/cmd/server/workflow/workflow.proto b/cmd/server/workflow/workflow.proto index 25051e6c647f..0f2a13492b9f 100644 --- a/cmd/server/workflow/workflow.proto +++ b/cmd/server/workflow/workflow.proto @@ -4,6 +4,7 @@ option go_package = "github.com/argoproj/argo/cmd/server/workflow"; import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1/generated.proto"; // Workflow Service @@ -20,28 +21,88 @@ message WorkflowListResponse{ } +//message LogEntry { +// string content = 1 [(gogoproto.nullable) = false]; +// k8s.io.apimachinery.pkg.apis.meta.v1.Time timeStamp = 2 [(gogoproto.nullable) = false]; +//} + + message WorkflowResponse{ - github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow workflows =1; + string Name = 1; + string Status = 2; + string Message = 3; } + message WorkflowQuery{ - string Name = 1; + string name = 1; string Namespace = 2; int32 StartIdx = 3; int32 PageSize = 4; } +message WorkflowUpdateQuery{ + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow workflow = 1; + bool memoized = 2; +} + service WorkflowService { - rpc Create(github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) returns (WorkflowCreateResponse){ + rpc Create(github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - post: "/api/v1/workflow" + post: "/api/v1/workflows" body: "*" }; } + + rpc Get(WorkflowQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + option (google.api.http).get = "/api/v1/workflows/{name}"; + } + rpc List(WorkflowQuery) returns (WorkflowListResponse){ option (google.api.http).get = "/api/v1/workflows"; } - rpc Get(WorkflowQuery) returns (WorkflowResponse){ - option (google.api.http).get = "/api/v1/workflow"; + rpc Delete(WorkflowQuery) returns (WorkflowResponse){ + option (google.api.http).delete = "/api/v1/workflows/{name}"; + } + + rpc Retry(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + option (google.api.http) = { + put: "/api/v1/workflows/{workflow.metadata.name}" + body: "workflow" + }; + } + + rpc Resubmit(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + option (google.api.http) = { + put: "/api/v1/workflows/{workflow.metadata.name}" + body: "workflow" + }; + } + + rpc Resume(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + option (google.api.http) = { + put: "/api/v1/workflows/{workflow.metadata.name}" + body: "workflow" + }; + } + + rpc Suspend(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + option (google.api.http) = { + put: "/api/v1/workflows/{workflow.metadata.name}" + body: "workflow" + }; + } + + rpc Terminate(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + option (google.api.http) = { + put: "/api/v1/workflows/{workflow.metadata.name}" + body: "workflow" + }; } + +// // PodLogs returns stream of log entries for the specified pod. Pod +// rpc PodLogs(WorkflowQuery) returns (stream LogEntry) { +// option (google.api.http).get = "/api/v1/workflow/{name}/pods/{podName}/logs"; +// } + } \ No newline at end of file diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json index 67483ffac0e5..516e38762402 100644 --- a/cmd/server/workflow/workflow.swagger.json +++ b/cmd/server/workflow/workflow.swagger.json @@ -16,20 +16,20 @@ "application/json" ], "paths": { - "/api/v1/workflow": { + "/api/v1/workflows": { "get": { - "operationId": "Get", + "operationId": "List", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/workflowWorkflowResponse" + "$ref": "#/definitions/workflowWorkflowListResponse" } } }, "parameters": [ { - "name": "Name", + "name": "name", "in": "query", "required": false, "type": "string" @@ -65,7 +65,7 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/workflowWorkflowCreateResponse" + "$ref": "#/definitions/v1alpha1Workflow" } } }, @@ -84,24 +84,66 @@ ] } }, - "/api/v1/workflows": { + "/api/v1/workflows/{name}": { "get": { - "operationId": "List", + "operationId": "Get", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/workflowWorkflowListResponse" + "$ref": "#/definitions/v1alpha1Workflow" } } }, "parameters": [ { - "name": "Name", + "name": "name", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "Namespace", "in": "query", "required": false, "type": "string" }, + { + "name": "StartIdx", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "PageSize", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "WorkflowService" + ] + }, + "delete": { + "operationId": "Delete", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/workflowWorkflowResponse" + } + } + }, + "parameters": [ + { + "name": "name", + "in": "path", + "required": true, + "type": "string" + }, { "name": "Namespace", "in": "query", @@ -127,6 +169,39 @@ "WorkflowService" ] } + }, + "/api/v1/workflows/{workflow.metadata.name}": { + "put": { + "operationId": "Terminate", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "parameters": [ + { + "name": "workflow.metadata.name", + "description": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + ], + "tags": [ + "WorkflowService" + ] + } } }, "definitions": { @@ -3665,14 +3740,6 @@ }, "title": "WorkflowStep is a reference to a template to execute in a series of step" }, - "workflowWorkflowCreateResponse": { - "type": "object", - "properties": { - "response": { - "type": "string" - } - } - }, "workflowWorkflowListResponse": { "type": "object", "properties": { @@ -3687,8 +3754,14 @@ "workflowWorkflowResponse": { "type": "object", "properties": { - "workflows": { - "$ref": "#/definitions/v1alpha1Workflow" + "Name": { + "type": "string" + }, + "Status": { + "type": "string" + }, + "Message": { + "type": "string" } } }, diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go index 2c8acc0349b7..fcaa6dbd1d51 100644 --- a/cmd/server/workflow/workflow_service.go +++ b/cmd/server/workflow/workflow_service.go @@ -1,67 +1,188 @@ package workflow import ( + "encoding/json" "fmt" "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned" - //log "github.com/sirupsen/logrus" + wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" + "github.com/argoproj/argo/workflow/util" "golang.org/x/net/context" + "google.golang.org/grpc/metadata" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" ) -type Server struct{ - Namespace string - Clientset versioned.Clientset +type Server struct { + Namespace string + Clientset versioned.Clientset + EnableClientAuth bool } - -func NewServer(Namespace string, clientset versioned.Clientset) WorkflowServiceServer{ - return &Server{Namespace:Namespace, Clientset:clientset} +func NewServer(Namespace string, clientset versioned.Clientset, enableClientAuth bool) WorkflowServiceServer { + return &Server{Namespace: Namespace, Clientset: clientset, EnableClientAuth: enableClientAuth} } -func (s *Server) Create(context.Context, *v1alpha1.Workflow) (*WorkflowCreateResponse, error){ +func (s *Server) GetClientSet(md metadata.MD) (*versioned.Clientset, error) { - return nil, nil + if s.EnableClientAuth { + return &s.Clientset, nil + } + + var restConfigStr, bearerToken string + + restConfigStr = md.Get(CLIENT_REST_CONFIG)[0] + + bearerToken = md.Get(AUTH_TOKEN)[0] + + restConfig := rest.Config{} + err := json.Unmarshal([]byte(restConfigStr), &restConfig) + if err != nil { + return nil, err + } + restConfig.BearerToken = string(bearerToken) + //restConfig :=rest.Config{ + // // TODO: switch to using cluster DNS. + // Host: host, + // TLSClientConfig: tlsClientConfig, + // BearerToken: string(bearerToken), + // + // } + + fmt.Println(restConfigStr) + // create the clientset + clientset, err := wfclientset.NewForConfig(&restConfig) + + if err != nil { + return nil, err + } + + return clientset, nil } +func (s *Server) Create(ctx context.Context, in *v1alpha1.Workflow) (*v1alpha1.Workflow, error) { + + md, _ := metadata.FromIncomingContext(ctx) + clientset, err := s.GetClientSet(md) -func (s *Server)Get(ctx context.Context, query *WorkflowQuery) (*WorkflowResponse, error){ - wf, err := s.Clientset.ArgoprojV1alpha1().Workflows(s.Namespace).Get(query.Name, v1.GetOptions{}) - fmt.Println("Welcome") + if clientset == nil { + return nil, nil + } + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + wf, err := s.Clientset.ArgoprojV1alpha1().Workflows(namespace).Create(in) if err != nil { fmt.Println(err) + return nil, err + } + + return wf, nil +} + +func (s *Server) Get(ctx context.Context, in *WorkflowQuery) (*v1alpha1.Workflow, error) { + + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace } - var wfRsp WorkflowResponse - //bytes, err := json.Marshal(wf) - wfRsp.Workflows = wf - - //err = wfRsp.Workflows.Unmarshal(byte) - fmt.Println("Error : ",err) - fmt.Println(wfRsp.GetWorkflows()) - //bytes, err := wfRsp.Marshal() + md, _ := metadata.FromIncomingContext(ctx) + + clientset, err := s.GetClientSet(md) + + if clientset == nil { + return nil, nil + + } + wf, err := clientset.ArgoprojV1alpha1().Workflows(namespace).Get(in.Name, v1.GetOptions{}) + if err != nil { fmt.Println(err) + return nil, err } - //wfRsp.Unmarshal(bytes) - fmt.Println(wfRsp) - return &wfRsp, err + return wf, err } -func (s *Server) List(ctx context.Context, query *WorkflowQuery) (*WorkflowListResponse, error) { - wfList, err := s.Clientset.ArgoprojV1alpha1().Workflows(s.Namespace).List(v1.ListOptions{}) +func (s *Server) List(ctx context.Context, in *WorkflowQuery) (*WorkflowListResponse, error) { + + namespace := s.Namespace + + if in.Namespace != "" { + namespace = in.Namespace + } + + wfList, err := s.Clientset.ArgoprojV1alpha1().Workflows(namespace).List(v1.ListOptions{}) if err != nil { fmt.Println(err) } //fmt.Println(wfList) var wfListItem []*v1alpha1.Workflow - for idx,_ := range wfList.Items{ + for idx, _ := range wfList.Items { wfListItem = append(wfListItem, &wfList.Items[idx]) } var wfListRsp = WorkflowListResponse{} wfListRsp.Workflows = wfListItem fmt.Println(wfListRsp) - return &wfListRsp,nil + return &wfListRsp, nil + +} + +func (s *Server) Delete(ctx context.Context, in *WorkflowQuery) (*WorkflowResponse, error) { + + return nil, nil +} + +func (s *Server) Retry(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { + //namespace := s.Namespace + //if in.Workflow.Namespace != "" { + // namespace = in.Workflow.Namespace + //} + //kubeClient := commonutil.InitKubeClient() + // + ////wf, err := util.RetryWorkflow(kubeClient., s.Clientset.ArgoprojV1alpha1().Workflows(namespace),in.Workflow) + // + //if err != nil { + // fmt.Println(err) + // return nil, err + //} + // + //return wf, err + return nil, nil +} + +func (s *Server) Resubmit(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { + namespace := s.Namespace + if in.Workflow.Namespace != "" { + namespace = in.Workflow.Namespace + } + + var wfClientset *versioned.Clientset + + wf, err := s.Clientset.ArgoprojV1alpha1().Workflows(namespace).Get(in.Workflow.Name, v1.GetOptions{}) + //errors.CheckError(err) + newWF, err := util.FormulateResubmitWorkflow(wf, in.Memoized) + //errors.CheckError(err) + created, err := util.SubmitWorkflow(s.Clientset.ArgoprojV1alpha1().Workflows(namespace), wfClientset, namespace, newWF, nil) -} \ No newline at end of file + if err != nil { + fmt.Println(err) + return nil, err + } + + return created, err +} + +func (s *Server) Resume(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { + return nil, nil +} + +func (s *Server) Suspend(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { + return nil, nil +} + +func (s *Server) Terminate(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { + return nil, nil +} diff --git a/util/util.go b/util/util.go index b1545c26e58d..43c6ac77e273 100644 --- a/util/util.go +++ b/util/util.go @@ -1,16 +1,18 @@ package util import ( - "io/ioutil" - log "github.com/sirupsen/logrus" + "io/ioutil" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "os" "github.com/argoproj/argo/errors" "github.com/argoproj/argo/util/retry" + "k8s.io/client-go/rest" ) type Closer interface { @@ -57,3 +59,25 @@ func WriteTeriminateMessage(message string) { panic(err) } } + +func GetClientConfig() clientcmd.ClientConfig { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig + overrides := clientcmd.ConfigOverrides{} + //kflags := clientcmd.RecommendedConfigOverrideFlags("") + return clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin) +} + +func InitKubeClient() *rest.Config { + + var err error + var clientConfig clientcmd.ClientConfig + + clientConfig = GetClientConfig() + config, err := clientConfig.ClientConfig() + if err != nil { + panic(err.Error()) + } + + return config +} \ No newline at end of file From 142b01eecf7e20446d8aa2a9488bc484f3d67030 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Tue, 22 Oct 2019 15:02:49 -0700 Subject: [PATCH 004/421] updated step.go --- workflow/controller/steps.go | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/workflow/controller/steps.go b/workflow/controller/steps.go index 8a82f6a74219..f042f1dc9cff 100644 --- a/workflow/controller/steps.go +++ b/workflow/controller/steps.go @@ -75,7 +75,7 @@ func (woc *wfOperationCtx) executeSteps(nodeName string, tmplCtx *templateresolu } } - sgNode = woc.executeStepGroup(stepGroup.Steps, sgNodeName, &stepsCtx) + sgNode := woc.executeStepGroup(stepGroup.Steps, sgNodeName, &stepsCtx) if !sgNode.Completed() { woc.log.Infof("Workflow step group node %v not yet completed", sgNode) @@ -100,17 +100,22 @@ func (woc *wfOperationCtx) executeSteps(nodeName string, tmplCtx *templateresolu // We add the aggregate outputs of our children to the scope as a JSON list var childNodes []wfv1.NodeStatus for _, node := range woc.wf.Status.Nodes { - if node.BoundaryID == stepsCtx.boundaryID && strings.HasPrefix(node.Name, childNodeName+"(") { + if node.BoundaryID == stepsCtx.boundaryID && strings.HasPrefix(node.Name, childNodeName+"(") && node.Type != wfv1.NodeTypeSkipped { childNodes = append(childNodes, node) } } - tmpl, err := stepsCtx.tmplCtx.GetTemplate(&step) - if err != nil { - return err - } - err = woc.processAggregateNodeOutputs(tmpl, stepsCtx.scope, prefix, childNodes) - if err != nil { - return err + if len(childNodes) > 0 { + // Expanded child nodes should be created from the same template. + tmpl := woc.wf.GetStoredOrLocalTemplate(&childNodes[0]) + if tmpl == nil { + return errors.InternalErrorf("Template of step node '%s' not found (inferred from %s)", childNodeName, childNodes[0].Name) + } + err := woc.processAggregateNodeOutputs(tmpl, stepsCtx.scope, prefix, childNodes) + if err != nil { + return err + } + } else { + woc.log.Infof("Step '%s' has no expanded child nodes", childNode) } } else { woc.processNodeOutputs(stepsCtx.scope, prefix, childNode) From 83b8f73fc6290319efd718ecdebb97190cecbffa Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Wed, 23 Oct 2019 09:53:05 -0700 Subject: [PATCH 005/421] Fixed logging and dependency --- Gopkg.lock | 2 ++ workflow/util/util.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Gopkg.lock b/Gopkg.lock index d9f8c4f5aaa8..2be442512860 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -60,6 +60,7 @@ digest = "1:d04fdd419650d1bc6c84b0988a94a7438cc3143e95a61a63f5c2bf56542156e3" name = "github.com/argoproj/pkg" packages = [ + "auth", "cli", "errors", "exec", @@ -1414,6 +1415,7 @@ analyzer-version = 1 input-imports = [ "github.com/Knetic/govaluate", + "github.com/argoproj/pkg/auth", "github.com/argoproj/pkg/cli", "github.com/argoproj/pkg/errors", "github.com/argoproj/pkg/exec", diff --git a/workflow/util/util.go b/workflow/util/util.go index b279f9b10c77..4dfe48348d76 100644 --- a/workflow/util/util.go +++ b/workflow/util/util.go @@ -516,7 +516,7 @@ func RetryWorkflow(kubeClient kubernetes.Interface, wfClient v1alpha1.WorkflowIn // do not add this status to the node. pretend as if this node never existed. default: // Do not allow retry of workflows with pods in Running/Pending phase - return nil, errors.InternalErrorf("Workflow cannot be retried with node %s in %s phase", node, node.Phase) + return nil, errors.InternalErrorf("Workflow cannot be retried with node %s in %s phase", node.Name, node.Phase) } if node.Type == wfv1.NodeTypePod { log.Infof("Deleting pod: %s", node.ID) From dcf1669616b8e13d64be2ab43d30cee339063bef Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Thu, 24 Oct 2019 13:00:59 -0700 Subject: [PATCH 006/421] Add second commit --- api/openapi-spec/swagger.json | 2 - assets/swagger.json | 0 cmd/server/{ => apiserver}/argoserver.go | 2 +- cmd/{ => server}/main.go | 8 +- cmd/server/workflow/workflow.pb.go | 1831 +++++++++++++---- cmd/server/workflow/workflow.pb.gw.go | 932 +++++++-- cmd/server/workflow/workflow.proto | 103 +- cmd/server/workflow/workflow.swagger.json | 411 +++- cmd/server/workflow/workflow_service.go | 229 ++- hack/generate-proto.sh | 4 +- .../workflow/v1alpha1/openapi_generated.go | 434 +++- workflow/controller/workflowpod.go | 2 +- 12 files changed, 3213 insertions(+), 745 deletions(-) create mode 100644 assets/swagger.json rename cmd/server/{ => apiserver}/argoserver.go (99%) rename cmd/{ => server}/main.go (90%) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index f96ccc9e6b5c..8c6771189c43 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -1166,7 +1166,6 @@ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ScriptTemplate" }, "securityContext": { - "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext" }, "serviceAccountName": { @@ -1485,7 +1484,6 @@ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ExecutorConfig" }, "hostAliases": { - "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec", "type": "array", "items": { "$ref": "#/definitions/io.k8s.api.core.v1.HostAlias" diff --git a/assets/swagger.json b/assets/swagger.json new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/cmd/server/argoserver.go b/cmd/server/apiserver/argoserver.go similarity index 99% rename from cmd/server/argoserver.go rename to cmd/server/apiserver/argoserver.go index 005c77485ef2..f6e784577e8b 100644 --- a/cmd/server/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -1,4 +1,4 @@ -package server +package apiserver import ( "github.com/argoproj/argo/cmd/server/workflow" diff --git a/cmd/main.go b/cmd/server/main.go similarity index 90% rename from cmd/main.go rename to cmd/server/main.go index ffa1cb150085..fb99340801d1 100644 --- a/cmd/main.go +++ b/cmd/server/main.go @@ -1,8 +1,8 @@ -package main +package server import ( "fmt" - "github.com/argoproj/argo/cmd/server" + "github.com/argoproj/argo/cmd/server/apiserver" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" cmdutil "github.com/argoproj/argo/util/cmd" "github.com/argoproj/pkg/cli" @@ -62,8 +62,8 @@ func NewRootCommand() *cobra.Command { ctx, cancel := context.WithCancel(context.Background()) var clientAuth bool clientAuth, err =strconv.ParseBool( enableClientAuth) - var opts = server.ArgoServerOpts{Namespace: namespace, KubeClientset: wflientset, EnableClientAuth: clientAuth} - argoSvr := server.NewArgoServer(ctx, opts ) + var opts = apiserver.ArgoServerOpts{Namespace: namespace, KubeClientset: wflientset, EnableClientAuth: clientAuth} + argoSvr := apiserver.NewArgoServer(ctx, opts ) defer cancel() go argoSvr.Run(ctx,8082) diff --git a/cmd/server/workflow/workflow.pb.go b/cmd/server/workflow/workflow.pb.go index 9205a78e3605..cbb8371e1b9e 100644 --- a/cmd/server/workflow/workflow.pb.go +++ b/cmd/server/workflow/workflow.pb.go @@ -15,9 +15,12 @@ import ( proto "github.com/gogo/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" io "io" - _ "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" + math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. @@ -29,343 +32,528 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type WorkflowCreateResponse struct { - Response string `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type WorkflowCreateRequest struct { + Workflows *v1alpha1.Workflow `protobuf:"bytes,1,opt,name=Workflows,proto3" json:"Workflows,omitempty"` + CreateOptions *v1.CreateOptions `protobuf:"bytes,2,opt,name=CreateOptions,proto3" json:"CreateOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *WorkflowCreateResponse) Reset() { *m = WorkflowCreateResponse{} } -func (m *WorkflowCreateResponse) String() string { return proto.CompactTextString(m) } -func (*WorkflowCreateResponse) ProtoMessage() {} -func (*WorkflowCreateResponse) Descriptor() ([]byte, []int) { +func (m *WorkflowCreateRequest) Reset() { *m = WorkflowCreateRequest{} } +func (m *WorkflowCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowCreateRequest) ProtoMessage() {} +func (*WorkflowCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptor_192bc67c39cca05a, []int{0} } -func (m *WorkflowCreateResponse) XXX_Unmarshal(b []byte) error { +func (m *WorkflowCreateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *WorkflowCreateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *WorkflowCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_WorkflowCreateResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_WorkflowCreateRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *WorkflowCreateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowCreateResponse.Merge(m, src) +func (m *WorkflowCreateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowCreateRequest.Merge(m, src) } -func (m *WorkflowCreateResponse) XXX_Size() int { +func (m *WorkflowCreateRequest) XXX_Size() int { return m.Size() } -func (m *WorkflowCreateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowCreateResponse.DiscardUnknown(m) +func (m *WorkflowCreateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowCreateRequest.DiscardUnknown(m) } -var xxx_messageInfo_WorkflowCreateResponse proto.InternalMessageInfo +var xxx_messageInfo_WorkflowCreateRequest proto.InternalMessageInfo -func (m *WorkflowCreateResponse) GetResponse() string { +func (m *WorkflowCreateRequest) GetWorkflows() *v1alpha1.Workflow { if m != nil { - return m.Response + return m.Workflows } - return "" + return nil +} + +func (m *WorkflowCreateRequest) GetCreateOptions() *v1.CreateOptions { + if m != nil { + return m.CreateOptions + } + return nil } -type WorkflowListResponse struct { - Workflows []*v1alpha1.Workflow `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type WorkflowGetRequest struct { + WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + GetOptions *v1.GetOptions `protobuf:"bytes,3,opt,name=GetOptions,proto3" json:"GetOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *WorkflowListResponse) Reset() { *m = WorkflowListResponse{} } -func (m *WorkflowListResponse) String() string { return proto.CompactTextString(m) } -func (*WorkflowListResponse) ProtoMessage() {} -func (*WorkflowListResponse) Descriptor() ([]byte, []int) { +func (m *WorkflowGetRequest) Reset() { *m = WorkflowGetRequest{} } +func (m *WorkflowGetRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowGetRequest) ProtoMessage() {} +func (*WorkflowGetRequest) Descriptor() ([]byte, []int) { return fileDescriptor_192bc67c39cca05a, []int{1} } -func (m *WorkflowListResponse) XXX_Unmarshal(b []byte) error { +func (m *WorkflowGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *WorkflowListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *WorkflowGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_WorkflowListResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_WorkflowGetRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *WorkflowListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowListResponse.Merge(m, src) +func (m *WorkflowGetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowGetRequest.Merge(m, src) } -func (m *WorkflowListResponse) XXX_Size() int { +func (m *WorkflowGetRequest) XXX_Size() int { return m.Size() } -func (m *WorkflowListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowListResponse.DiscardUnknown(m) +func (m *WorkflowGetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowGetRequest.DiscardUnknown(m) } -var xxx_messageInfo_WorkflowListResponse proto.InternalMessageInfo +var xxx_messageInfo_WorkflowGetRequest proto.InternalMessageInfo -func (m *WorkflowListResponse) GetWorkflows() []*v1alpha1.Workflow { +func (m *WorkflowGetRequest) GetWorkflowName() string { if m != nil { - return m.Workflows + return m.WorkflowName + } + return "" +} + +func (m *WorkflowGetRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *WorkflowGetRequest) GetGetOptions() *v1.GetOptions { + if m != nil { + return m.GetOptions } return nil } -type WorkflowResponse struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"` - Message string `protobuf:"bytes,3,opt,name=Message,proto3" json:"Message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type WorkflowListRequest struct { + Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + ListOptions *v1.ListOptions `protobuf:"bytes,2,opt,name=ListOptions,proto3" json:"ListOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *WorkflowResponse) Reset() { *m = WorkflowResponse{} } -func (m *WorkflowResponse) String() string { return proto.CompactTextString(m) } -func (*WorkflowResponse) ProtoMessage() {} -func (*WorkflowResponse) Descriptor() ([]byte, []int) { +func (m *WorkflowListRequest) Reset() { *m = WorkflowListRequest{} } +func (m *WorkflowListRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowListRequest) ProtoMessage() {} +func (*WorkflowListRequest) Descriptor() ([]byte, []int) { return fileDescriptor_192bc67c39cca05a, []int{2} } -func (m *WorkflowResponse) XXX_Unmarshal(b []byte) error { +func (m *WorkflowListRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *WorkflowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *WorkflowListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_WorkflowResponse.Marshal(b, m, deterministic) + return xxx_messageInfo_WorkflowListRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *WorkflowResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowResponse.Merge(m, src) +func (m *WorkflowListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowListRequest.Merge(m, src) } -func (m *WorkflowResponse) XXX_Size() int { +func (m *WorkflowListRequest) XXX_Size() int { return m.Size() } -func (m *WorkflowResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowResponse.DiscardUnknown(m) +func (m *WorkflowListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowListRequest.DiscardUnknown(m) } -var xxx_messageInfo_WorkflowResponse proto.InternalMessageInfo +var xxx_messageInfo_WorkflowListRequest proto.InternalMessageInfo -func (m *WorkflowResponse) GetName() string { +func (m *WorkflowListRequest) GetNamespace() string { if m != nil { - return m.Name + return m.Namespace } return "" } -func (m *WorkflowResponse) GetStatus() string { +func (m *WorkflowListRequest) GetListOptions() *v1.ListOptions { + if m != nil { + return m.ListOptions + } + return nil +} + +type WorkflowUpdateRequest struct { + WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowUpdateRequest) Reset() { *m = WorkflowUpdateRequest{} } +func (m *WorkflowUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowUpdateRequest) ProtoMessage() {} +func (*WorkflowUpdateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{3} +} +func (m *WorkflowUpdateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowUpdateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowUpdateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowUpdateRequest.Merge(m, src) +} +func (m *WorkflowUpdateRequest) XXX_Size() int { + return m.Size() +} +func (m *WorkflowUpdateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowUpdateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowUpdateRequest proto.InternalMessageInfo + +func (m *WorkflowUpdateRequest) GetWorkflowName() string { if m != nil { - return m.Status + return m.WorkflowName } return "" } -func (m *WorkflowResponse) GetMessage() string { +func (m *WorkflowUpdateRequest) GetNamespace() string { if m != nil { - return m.Message + return m.Namespace } return "" } -type WorkflowQuery struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +type WorkflowLogRequest struct { + WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - StartIdx int32 `protobuf:"varint,3,opt,name=StartIdx,proto3" json:"StartIdx,omitempty"` - PageSize int32 `protobuf:"varint,4,opt,name=PageSize,proto3" json:"PageSize,omitempty"` + PodName string `protobuf:"bytes,3,opt,name=PodName,proto3" json:"PodName,omitempty"` + Container string `protobuf:"bytes,4,opt,name=Container,proto3" json:"Container,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *WorkflowQuery) Reset() { *m = WorkflowQuery{} } -func (m *WorkflowQuery) String() string { return proto.CompactTextString(m) } -func (*WorkflowQuery) ProtoMessage() {} -func (*WorkflowQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{3} +func (m *WorkflowLogRequest) Reset() { *m = WorkflowLogRequest{} } +func (m *WorkflowLogRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowLogRequest) ProtoMessage() {} +func (*WorkflowLogRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{4} } -func (m *WorkflowQuery) XXX_Unmarshal(b []byte) error { +func (m *WorkflowLogRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *WorkflowQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *WorkflowLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_WorkflowQuery.Marshal(b, m, deterministic) + return xxx_messageInfo_WorkflowLogRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *WorkflowQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowQuery.Merge(m, src) +func (m *WorkflowLogRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowLogRequest.Merge(m, src) } -func (m *WorkflowQuery) XXX_Size() int { +func (m *WorkflowLogRequest) XXX_Size() int { return m.Size() } -func (m *WorkflowQuery) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowQuery.DiscardUnknown(m) +func (m *WorkflowLogRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowLogRequest.DiscardUnknown(m) } -var xxx_messageInfo_WorkflowQuery proto.InternalMessageInfo +var xxx_messageInfo_WorkflowLogRequest proto.InternalMessageInfo -func (m *WorkflowQuery) GetName() string { +func (m *WorkflowLogRequest) GetWorkflowName() string { if m != nil { - return m.Name + return m.WorkflowName } return "" } -func (m *WorkflowQuery) GetNamespace() string { +func (m *WorkflowLogRequest) GetNamespace() string { if m != nil { return m.Namespace } return "" } -func (m *WorkflowQuery) GetStartIdx() int32 { +func (m *WorkflowLogRequest) GetPodName() string { if m != nil { - return m.StartIdx + return m.PodName } - return 0 + return "" } -func (m *WorkflowQuery) GetPageSize() int32 { +func (m *WorkflowLogRequest) GetContainer() string { if m != nil { - return m.PageSize + return m.Container } - return 0 + return "" } -type WorkflowUpdateQuery struct { - Workflow *v1alpha1.Workflow `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` - Memoized bool `protobuf:"varint,2,opt,name=memoized,proto3" json:"memoized,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type WorkflowDeleteRequest struct { + WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + DeleteOptions *v1.DeleteOptions `protobuf:"bytes,3,opt,name=DeleteOptions,proto3" json:"DeleteOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *WorkflowUpdateQuery) Reset() { *m = WorkflowUpdateQuery{} } -func (m *WorkflowUpdateQuery) String() string { return proto.CompactTextString(m) } -func (*WorkflowUpdateQuery) ProtoMessage() {} -func (*WorkflowUpdateQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{4} +func (m *WorkflowDeleteRequest) Reset() { *m = WorkflowDeleteRequest{} } +func (m *WorkflowDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowDeleteRequest) ProtoMessage() {} +func (*WorkflowDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{5} } -func (m *WorkflowUpdateQuery) XXX_Unmarshal(b []byte) error { +func (m *WorkflowDeleteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *WorkflowUpdateQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *WorkflowDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_WorkflowUpdateQuery.Marshal(b, m, deterministic) + return xxx_messageInfo_WorkflowDeleteRequest.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } -func (m *WorkflowUpdateQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkflowUpdateQuery.Merge(m, src) +func (m *WorkflowDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowDeleteRequest.Merge(m, src) } -func (m *WorkflowUpdateQuery) XXX_Size() int { +func (m *WorkflowDeleteRequest) XXX_Size() int { return m.Size() } -func (m *WorkflowUpdateQuery) XXX_DiscardUnknown() { - xxx_messageInfo_WorkflowUpdateQuery.DiscardUnknown(m) +func (m *WorkflowDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowDeleteRequest proto.InternalMessageInfo + +func (m *WorkflowDeleteRequest) GetWorkflowName() string { + if m != nil { + return m.WorkflowName + } + return "" } -var xxx_messageInfo_WorkflowUpdateQuery proto.InternalMessageInfo +func (m *WorkflowDeleteRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} -func (m *WorkflowUpdateQuery) GetWorkflow() *v1alpha1.Workflow { +func (m *WorkflowDeleteRequest) GetDeleteOptions() *v1.DeleteOptions { if m != nil { - return m.Workflow + return m.DeleteOptions } return nil } -func (m *WorkflowUpdateQuery) GetMemoized() bool { +type WorkflowDeleteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowDeleteResponse) Reset() { *m = WorkflowDeleteResponse{} } +func (m *WorkflowDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*WorkflowDeleteResponse) ProtoMessage() {} +func (*WorkflowDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{6} +} +func (m *WorkflowDeleteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowDeleteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowDeleteResponse.Merge(m, src) +} +func (m *WorkflowDeleteResponse) XXX_Size() int { + return m.Size() +} +func (m *WorkflowDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowDeleteResponse proto.InternalMessageInfo + +type LogEntry struct { + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + TimeStamp *v1.Time `protobuf:"bytes,2,opt,name=timeStamp,proto3" json:"timeStamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{7} +} +func (m *LogEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LogEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogEntry.Merge(m, src) +} +func (m *LogEntry) XXX_Size() int { + return m.Size() +} +func (m *LogEntry) XXX_DiscardUnknown() { + xxx_messageInfo_LogEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_LogEntry proto.InternalMessageInfo + +func (m *LogEntry) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *LogEntry) GetTimeStamp() *v1.Time { if m != nil { - return m.Memoized + return m.TimeStamp } - return false + return nil } func init() { - proto.RegisterType((*WorkflowCreateResponse)(nil), "workflow.WorkflowCreateResponse") - proto.RegisterType((*WorkflowListResponse)(nil), "workflow.WorkflowListResponse") - proto.RegisterType((*WorkflowResponse)(nil), "workflow.WorkflowResponse") - proto.RegisterType((*WorkflowQuery)(nil), "workflow.WorkflowQuery") - proto.RegisterType((*WorkflowUpdateQuery)(nil), "workflow.WorkflowUpdateQuery") + proto.RegisterType((*WorkflowCreateRequest)(nil), "workflow.WorkflowCreateRequest") + proto.RegisterType((*WorkflowGetRequest)(nil), "workflow.WorkflowGetRequest") + proto.RegisterType((*WorkflowListRequest)(nil), "workflow.WorkflowListRequest") + proto.RegisterType((*WorkflowUpdateRequest)(nil), "workflow.WorkflowUpdateRequest") + proto.RegisterType((*WorkflowLogRequest)(nil), "workflow.WorkflowLogRequest") + proto.RegisterType((*WorkflowDeleteRequest)(nil), "workflow.WorkflowDeleteRequest") + proto.RegisterType((*WorkflowDeleteResponse)(nil), "workflow.WorkflowDeleteResponse") + proto.RegisterType((*LogEntry)(nil), "workflow.LogEntry") } func init() { proto.RegisterFile("cmd/server/workflow/workflow.proto", fileDescriptor_192bc67c39cca05a) } var fileDescriptor_192bc67c39cca05a = []byte{ - // 628 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0x4d, 0x6b, 0x13, 0x41, - 0x18, 0xc7, 0x99, 0xbe, 0xa4, 0xc9, 0x88, 0xa8, 0xd3, 0x52, 0xd7, 0xa5, 0x86, 0xb2, 0xa7, 0x52, - 0x64, 0x97, 0xd6, 0x22, 0x52, 0xf4, 0x62, 0x05, 0x11, 0x54, 0x74, 0xa3, 0xa8, 0xf5, 0x20, 0xd3, - 0xec, 0xe3, 0x66, 0x4d, 0x76, 0x67, 0x99, 0x99, 0xa4, 0xa6, 0x22, 0x82, 0x67, 0x6f, 0x7e, 0x04, - 0xdf, 0xbe, 0x85, 0x67, 0x8f, 0x82, 0x5f, 0x40, 0x82, 0x1f, 0x44, 0x66, 0xb2, 0x33, 0x1b, 0xd9, - 0xe6, 0x94, 0x4b, 0x6e, 0xcf, 0x4b, 0x9e, 0xff, 0xf3, 0xdb, 0xc9, 0xf3, 0xec, 0x2c, 0xf6, 0xda, - 0x69, 0x14, 0x08, 0xe0, 0x03, 0xe0, 0xc1, 0x31, 0xe3, 0xdd, 0x57, 0x3d, 0x76, 0x6c, 0x0d, 0x3f, - 0xe7, 0x4c, 0x32, 0x52, 0x37, 0xbe, 0xbb, 0x16, 0xb3, 0x98, 0xe9, 0x60, 0xa0, 0xac, 0x71, 0xde, - 0xdd, 0x88, 0x19, 0x8b, 0x7b, 0x10, 0xd0, 0x3c, 0x09, 0x68, 0x96, 0x31, 0x49, 0x65, 0xc2, 0x32, - 0x51, 0x64, 0xf7, 0xba, 0xd7, 0x85, 0x9f, 0x30, 0x95, 0x4d, 0x69, 0xbb, 0x93, 0x64, 0xc0, 0x87, - 0x41, 0xde, 0x8d, 0x55, 0x40, 0x04, 0x29, 0x48, 0x1a, 0x0c, 0x76, 0x82, 0x18, 0x32, 0xe0, 0x54, - 0x42, 0x54, 0x54, 0x1d, 0xc4, 0x89, 0xec, 0xf4, 0x8f, 0xfc, 0x36, 0x4b, 0x03, 0xca, 0x75, 0xd3, - 0xd7, 0xda, 0x28, 0x4b, 0x2d, 0xee, 0x60, 0x87, 0xf6, 0xf2, 0x0e, 0xad, 0x88, 0x78, 0x7b, 0x78, - 0xfd, 0x69, 0xf1, 0xa3, 0x03, 0x0e, 0x54, 0x42, 0x08, 0x22, 0x67, 0x99, 0x00, 0xe2, 0xe2, 0x3a, - 0x2f, 0x6c, 0x07, 0x6d, 0xa2, 0xad, 0x46, 0x68, 0x7d, 0x4f, 0xe0, 0x35, 0x53, 0x75, 0x2f, 0x11, - 0xd2, 0xd6, 0xbc, 0xc0, 0x0d, 0xd3, 0x52, 0x38, 0x68, 0x73, 0x71, 0xeb, 0xcc, 0xee, 0x4d, 0xbf, - 0xc4, 0xf4, 0x0d, 0xa6, 0x36, 0xfc, 0xbc, 0x1b, 0xfb, 0x0a, 0xd3, 0xb7, 0x87, 0x69, 0x30, 0x7d, - 0xa3, 0x1e, 0x96, 0x7a, 0xde, 0x33, 0x7c, 0xde, 0x86, 0x4d, 0x43, 0x82, 0x97, 0x1e, 0xd0, 0xd4, - 0x00, 0x6a, 0x9b, 0xac, 0xe3, 0x5a, 0x4b, 0x52, 0xd9, 0x17, 0xce, 0x82, 0x8e, 0x16, 0x1e, 0x71, - 0xf0, 0xca, 0x7d, 0x10, 0x82, 0xc6, 0xe0, 0x2c, 0xea, 0x84, 0x71, 0xbd, 0x21, 0x3e, 0x6b, 0x94, - 0x1f, 0xf5, 0x81, 0x0f, 0x95, 0x6c, 0x36, 0x21, 0xab, 0x6c, 0xb2, 0x81, 0x1b, 0x4a, 0x5e, 0xe4, - 0xb4, 0x0d, 0x85, 0x72, 0x19, 0x50, 0xa7, 0xd5, 0x92, 0x94, 0xcb, 0xbb, 0xd1, 0x1b, 0xad, 0xbe, - 0x1c, 0x5a, 0x5f, 0xe5, 0x1e, 0xd2, 0x18, 0x5a, 0xc9, 0x09, 0x38, 0x4b, 0xe3, 0x9c, 0xf1, 0xbd, - 0x8f, 0x08, 0xaf, 0x9a, 0xde, 0x4f, 0xf2, 0x88, 0x4a, 0x18, 0x13, 0x3c, 0xc7, 0x76, 0xa4, 0x34, - 0xc5, 0xcc, 0x07, 0x69, 0xe5, 0x14, 0x4e, 0x0a, 0x29, 0x4b, 0x4e, 0x20, 0xd2, 0xcf, 0x51, 0x0f, - 0xad, 0xbf, 0xfb, 0x03, 0xe3, 0x73, 0xa6, 0xa4, 0x05, 0x7c, 0x90, 0xb4, 0x81, 0x7c, 0x41, 0xb8, - 0x36, 0x9e, 0x0d, 0x32, 0x1b, 0x83, 0x3b, 0x5b, 0xb9, 0xb7, 0xf1, 0xe1, 0xf7, 0xdf, 0x4f, 0x0b, - 0xeb, 0xde, 0x05, 0xbd, 0x46, 0x83, 0x1d, 0x3b, 0xde, 0x62, 0x1f, 0x6d, 0x93, 0xf7, 0x78, 0xf1, - 0x0e, 0x48, 0x72, 0xb1, 0x54, 0xf8, 0xef, 0x3f, 0x9d, 0xb5, 0xf9, 0xa6, 0x6e, 0xee, 0x12, 0xa7, - 0xd2, 0x3c, 0x78, 0xab, 0xe6, 0xe3, 0x1d, 0x39, 0xc4, 0x4b, 0x6a, 0x19, 0xa6, 0x13, 0x34, 0xab, - 0x89, 0xc9, 0xed, 0xf1, 0x2e, 0xe9, 0x16, 0xab, 0xa4, 0xfa, 0x7c, 0xe4, 0x25, 0xae, 0xdd, 0x86, - 0x1e, 0x48, 0x98, 0xae, 0xee, 0x56, 0x13, 0x56, 0xb9, 0x80, 0xdf, 0x9e, 0x0e, 0xff, 0x19, 0xe1, - 0xe5, 0x10, 0x24, 0x1f, 0x92, 0xcb, 0x55, 0x9d, 0x89, 0xc1, 0x9c, 0xf5, 0x18, 0x6f, 0x68, 0x92, - 0x6b, 0xee, 0xf6, 0x29, 0x24, 0xb6, 0x4c, 0xbd, 0xf1, 0x22, 0x2a, 0xa9, 0xaf, 0xd9, 0xf6, 0xcb, - 0xd1, 0xfd, 0x86, 0x70, 0x3d, 0x04, 0xd1, 0x3f, 0x4a, 0x13, 0x39, 0xd7, 0xa0, 0x6a, 0x67, 0x14, - 0x68, 0x0a, 0x73, 0x8d, 0xf9, 0x15, 0xe1, 0x95, 0x56, 0x5f, 0xe4, 0x90, 0x45, 0x73, 0xcd, 0xf9, - 0x1d, 0xe1, 0xc6, 0x63, 0xe0, 0x69, 0x92, 0xa9, 0xb7, 0xd0, 0x1c, 0x93, 0xde, 0xda, 0xff, 0x39, - 0x6a, 0xa2, 0x5f, 0xa3, 0x26, 0xfa, 0x33, 0x6a, 0xa2, 0xc3, 0x2b, 0x53, 0xaf, 0xe8, 0x53, 0xbe, - 0x29, 0x8e, 0x6a, 0xfa, 0x4a, 0xbe, 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe8, 0xf5, 0x0b, 0x8f, - 0x71, 0x08, 0x00, 0x00, + // 783 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0xcf, 0x6b, 0x13, 0x41, + 0x14, 0xc7, 0x99, 0xb6, 0xa6, 0xcd, 0xb4, 0x22, 0x8e, 0x5a, 0x42, 0x48, 0x7f, 0xb0, 0x20, 0x48, + 0x29, 0x3b, 0x4d, 0x5b, 0x50, 0x0a, 0x1e, 0xb4, 0x4a, 0x05, 0xa3, 0x96, 0x4d, 0x45, 0xaa, 0xa7, + 0x6d, 0xf2, 0xdc, 0xae, 0xc9, 0xee, 0xac, 0x3b, 0x93, 0x94, 0x12, 0x72, 0xd0, 0x83, 0x78, 0x52, + 0xd0, 0x8b, 0xde, 0xfc, 0x75, 0xf0, 0x0f, 0xf0, 0x8f, 0xf0, 0xa6, 0xe0, 0x3f, 0x20, 0xc5, 0x3f, + 0x44, 0x66, 0xb2, 0x3f, 0x9b, 0xb4, 0xa4, 0xa4, 0x87, 0x9c, 0x76, 0x76, 0xde, 0xbe, 0xf7, 0x3e, + 0xfb, 0xe6, 0x3b, 0x6f, 0x06, 0x6b, 0x15, 0xa7, 0x4a, 0x39, 0xf8, 0x4d, 0xf0, 0xe9, 0x1e, 0xf3, + 0x6b, 0x4f, 0xeb, 0x6c, 0x2f, 0x1a, 0xe8, 0x9e, 0xcf, 0x04, 0x23, 0x13, 0xe1, 0x7b, 0xfe, 0xa2, + 0xc5, 0x2c, 0xa6, 0x26, 0xa9, 0x1c, 0x75, 0xec, 0xf9, 0x82, 0xc5, 0x98, 0x55, 0x07, 0x6a, 0x7a, + 0x36, 0x35, 0x5d, 0x97, 0x09, 0x53, 0xd8, 0xcc, 0xe5, 0x81, 0x75, 0xb5, 0x76, 0x8d, 0xeb, 0x36, + 0x93, 0x56, 0xc7, 0xac, 0xec, 0xda, 0x2e, 0xf8, 0xfb, 0xd4, 0xab, 0x59, 0x72, 0x82, 0x53, 0x07, + 0x84, 0x49, 0x9b, 0x45, 0x6a, 0x81, 0x0b, 0xbe, 0x29, 0xa0, 0x1a, 0x78, 0xad, 0x5b, 0xb6, 0xd8, + 0x6d, 0xec, 0xe8, 0x15, 0xe6, 0x50, 0xd3, 0x57, 0x49, 0x9f, 0xa9, 0x41, 0xec, 0x1a, 0xe1, 0x36, + 0x8b, 0x66, 0xdd, 0xdb, 0x35, 0xbb, 0x82, 0x68, 0xbf, 0x10, 0xbe, 0xf4, 0x28, 0xf8, 0x6a, 0xdd, + 0x07, 0x53, 0x80, 0x01, 0xcf, 0x1b, 0xc0, 0x05, 0x79, 0x82, 0xb3, 0xa1, 0x81, 0xe7, 0xd0, 0x3c, + 0xba, 0x32, 0xb9, 0x7c, 0x5d, 0x8f, 0x53, 0xea, 0x61, 0x4a, 0x35, 0xd0, 0xbd, 0x9a, 0xa5, 0xcb, + 0x94, 0x7a, 0x54, 0x98, 0x30, 0xa5, 0x1e, 0x46, 0x31, 0xe2, 0x78, 0x64, 0x1b, 0x9f, 0xed, 0x64, + 0x7b, 0xe0, 0xa9, 0x42, 0xe4, 0x46, 0x54, 0x82, 0x15, 0xbd, 0x53, 0x09, 0x3d, 0x59, 0x89, 0x38, + 0xb6, 0xac, 0x84, 0xde, 0x2c, 0xea, 0x29, 0x57, 0x23, 0x1d, 0x49, 0xfb, 0x8e, 0x30, 0x09, 0x13, + 0x6d, 0x80, 0x08, 0x7f, 0x47, 0xc3, 0x53, 0xe1, 0xec, 0x7d, 0xd3, 0x01, 0xf5, 0x47, 0x59, 0x23, + 0x35, 0x47, 0x0a, 0x38, 0x2b, 0x9f, 0xdc, 0x33, 0x2b, 0xa0, 0x88, 0xb2, 0x46, 0x3c, 0x41, 0x36, + 0x31, 0xde, 0x00, 0x11, 0x02, 0x8f, 0x2a, 0xe0, 0xa5, 0xfe, 0x80, 0x63, 0x3f, 0x23, 0x11, 0x43, + 0x7b, 0x8d, 0xf0, 0x85, 0x10, 0xa0, 0x64, 0xf3, 0x88, 0x35, 0xc5, 0x81, 0x0e, 0x73, 0x94, 0xf1, + 0xa4, 0xfc, 0x38, 0x5d, 0xb9, 0x62, 0x7f, 0x20, 0x09, 0x47, 0x23, 0x19, 0x45, 0xdb, 0x8e, 0x65, + 0xf0, 0xd0, 0xab, 0x26, 0x64, 0x30, 0x70, 0xdd, 0xb4, 0x37, 0x89, 0x05, 0x29, 0x31, 0xeb, 0xf4, + 0x16, 0x24, 0x87, 0xc7, 0x37, 0x59, 0x55, 0x39, 0x8f, 0x2a, 0x5b, 0xf8, 0x2a, 0xfd, 0xd6, 0x99, + 0x2b, 0x4c, 0x59, 0x85, 0xdc, 0x58, 0xc7, 0x2f, 0x9a, 0xd0, 0x7e, 0x24, 0x34, 0x7f, 0x0b, 0xea, + 0x70, 0x8a, 0x3f, 0x2b, 0x85, 0xdd, 0x09, 0x99, 0xd6, 0x49, 0x9f, 0xc2, 0x4e, 0xb9, 0x1a, 0xe9, + 0x48, 0x5a, 0x0e, 0x4f, 0x1f, 0xa6, 0xe6, 0x1e, 0x73, 0x39, 0x68, 0x2e, 0x9e, 0x28, 0x31, 0xeb, + 0xb6, 0x2b, 0xfc, 0x7d, 0x59, 0x94, 0x0a, 0x73, 0x05, 0xb8, 0x22, 0xa0, 0x0f, 0x5f, 0xc9, 0x1d, + 0x9c, 0x15, 0xb6, 0x03, 0x65, 0x61, 0x3a, 0x5e, 0xa0, 0x9a, 0x85, 0xfe, 0xb0, 0xb6, 0x6c, 0x07, + 0x8c, 0xd8, 0x79, 0xf9, 0xe3, 0x14, 0x3e, 0x17, 0xa2, 0x94, 0xc1, 0x6f, 0xda, 0x15, 0x20, 0xaf, + 0x10, 0xce, 0x74, 0x36, 0x22, 0x99, 0x8b, 0x9b, 0x40, 0xcf, 0xd6, 0x92, 0x1f, 0xac, 0x8f, 0x68, + 0x85, 0x97, 0x7f, 0xfe, 0xbd, 0x1f, 0x99, 0xd6, 0xce, 0xab, 0x76, 0xda, 0x2c, 0x46, 0x6d, 0x8e, + 0xaf, 0xa1, 0x05, 0xf2, 0x01, 0xe1, 0xd1, 0x0d, 0x10, 0xa4, 0xd0, 0x4d, 0x11, 0xb7, 0x83, 0x41, + 0x11, 0x56, 0x15, 0x82, 0x4e, 0x16, 0xbb, 0x10, 0x68, 0x2b, 0x12, 0x43, 0x9b, 0xb6, 0x92, 0xca, + 0x69, 0x93, 0xb7, 0x08, 0x8f, 0xc9, 0x4d, 0x47, 0x66, 0xba, 0xd9, 0x12, 0xfb, 0x3f, 0x7f, 0x63, + 0x20, 0x38, 0x19, 0x49, 0xbb, 0xac, 0x00, 0xe7, 0xc8, 0xcc, 0xb1, 0x80, 0xe4, 0x05, 0xc2, 0x99, + 0x8e, 0x98, 0x7a, 0xad, 0x5a, 0x6a, 0x73, 0xe4, 0xe7, 0x8f, 0xfe, 0x20, 0xd0, 0x61, 0x50, 0x95, + 0x85, 0x93, 0x55, 0xe5, 0x13, 0xc2, 0x67, 0x0c, 0x90, 0xda, 0xed, 0x81, 0x90, 0x6a, 0x46, 0x83, + 0xae, 0xda, 0x55, 0xc5, 0x57, 0xcc, 0x9f, 0x88, 0x4f, 0x6a, 0xea, 0x2b, 0xc2, 0x13, 0x06, 0xf0, + 0xc6, 0x8e, 0x63, 0x8b, 0xe1, 0xa5, 0xfc, 0x8c, 0x70, 0x46, 0x52, 0x3a, 0x30, 0xbc, 0x8c, 0x5f, + 0x10, 0x1e, 0x2f, 0x37, 0xb8, 0x07, 0x6e, 0x75, 0x78, 0x21, 0xbf, 0x21, 0x9c, 0xdd, 0x02, 0xdf, + 0xb1, 0xdd, 0x23, 0xda, 0xd9, 0x70, 0x60, 0xbe, 0x43, 0xea, 0x00, 0x2c, 0x31, 0x8b, 0xf7, 0xea, + 0x76, 0xf1, 0x59, 0x9b, 0x27, 0xb1, 0x35, 0x3c, 0x28, 0xb4, 0xb2, 0x4a, 0x7b, 0x8f, 0xdc, 0x3d, + 0x9c, 0xf6, 0x98, 0xac, 0xd4, 0x63, 0x55, 0x4e, 0x5b, 0xc1, 0x31, 0xdb, 0xa6, 0x75, 0x66, 0x71, + 0xda, 0x8a, 0x4e, 0xd6, 0xf6, 0x12, 0xba, 0xb9, 0xf6, 0xf3, 0x60, 0x16, 0xfd, 0x3e, 0x98, 0x45, + 0x7f, 0x0f, 0x66, 0xd1, 0xe3, 0xc5, 0x23, 0xef, 0xa8, 0x3d, 0x2e, 0xd5, 0x3b, 0x19, 0x75, 0x27, + 0x5d, 0xf9, 0x1f, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x30, 0x03, 0x3e, 0x72, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -380,15 +568,17 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type WorkflowServiceClient interface { - Create(ctx context.Context, in *v1alpha1.Workflow, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) - Get(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) - List(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowListResponse, error) - Delete(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowResponse, error) - Retry(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) - Resubmit(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) - Resume(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) - Suspend(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) - Terminate(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Create(ctx context.Context, in *WorkflowCreateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Get(ctx context.Context, in *WorkflowGetRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + List(ctx context.Context, in *WorkflowListRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowList, error) + Delete(ctx context.Context, in *WorkflowDeleteRequest, opts ...grpc.CallOption) (*WorkflowDeleteResponse, error) + Retry(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Resubmit(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Resume(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Suspend(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Terminate(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + // PodLogs returns stream of log entries for the specified pod. Pod + PodLogs(ctx context.Context, in *WorkflowLogRequest, opts ...grpc.CallOption) (WorkflowService_PodLogsClient, error) } type workflowServiceClient struct { @@ -399,7 +589,7 @@ func NewWorkflowServiceClient(cc *grpc.ClientConn) WorkflowServiceClient { return &workflowServiceClient{cc} } -func (c *workflowServiceClient) Create(ctx context.Context, in *v1alpha1.Workflow, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { +func (c *workflowServiceClient) Create(ctx context.Context, in *WorkflowCreateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { out := new(v1alpha1.Workflow) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Create", in, out, opts...) if err != nil { @@ -408,7 +598,7 @@ func (c *workflowServiceClient) Create(ctx context.Context, in *v1alpha1.Workflo return out, nil } -func (c *workflowServiceClient) Get(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { +func (c *workflowServiceClient) Get(ctx context.Context, in *WorkflowGetRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { out := new(v1alpha1.Workflow) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Get", in, out, opts...) if err != nil { @@ -417,8 +607,8 @@ func (c *workflowServiceClient) Get(ctx context.Context, in *WorkflowQuery, opts return out, nil } -func (c *workflowServiceClient) List(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowListResponse, error) { - out := new(WorkflowListResponse) +func (c *workflowServiceClient) List(ctx context.Context, in *WorkflowListRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowList, error) { + out := new(v1alpha1.WorkflowList) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/List", in, out, opts...) if err != nil { return nil, err @@ -426,8 +616,8 @@ func (c *workflowServiceClient) List(ctx context.Context, in *WorkflowQuery, opt return out, nil } -func (c *workflowServiceClient) Delete(ctx context.Context, in *WorkflowQuery, opts ...grpc.CallOption) (*WorkflowResponse, error) { - out := new(WorkflowResponse) +func (c *workflowServiceClient) Delete(ctx context.Context, in *WorkflowDeleteRequest, opts ...grpc.CallOption) (*WorkflowDeleteResponse, error) { + out := new(WorkflowDeleteResponse) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Delete", in, out, opts...) if err != nil { return nil, err @@ -435,7 +625,7 @@ func (c *workflowServiceClient) Delete(ctx context.Context, in *WorkflowQuery, o return out, nil } -func (c *workflowServiceClient) Retry(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { +func (c *workflowServiceClient) Retry(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { out := new(v1alpha1.Workflow) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Retry", in, out, opts...) if err != nil { @@ -444,7 +634,7 @@ func (c *workflowServiceClient) Retry(ctx context.Context, in *WorkflowUpdateQue return out, nil } -func (c *workflowServiceClient) Resubmit(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { +func (c *workflowServiceClient) Resubmit(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { out := new(v1alpha1.Workflow) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Resubmit", in, out, opts...) if err != nil { @@ -453,7 +643,7 @@ func (c *workflowServiceClient) Resubmit(ctx context.Context, in *WorkflowUpdate return out, nil } -func (c *workflowServiceClient) Resume(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { +func (c *workflowServiceClient) Resume(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { out := new(v1alpha1.Workflow) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Resume", in, out, opts...) if err != nil { @@ -462,7 +652,7 @@ func (c *workflowServiceClient) Resume(ctx context.Context, in *WorkflowUpdateQu return out, nil } -func (c *workflowServiceClient) Suspend(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { +func (c *workflowServiceClient) Suspend(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { out := new(v1alpha1.Workflow) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Suspend", in, out, opts...) if err != nil { @@ -471,7 +661,7 @@ func (c *workflowServiceClient) Suspend(ctx context.Context, in *WorkflowUpdateQ return out, nil } -func (c *workflowServiceClient) Terminate(ctx context.Context, in *WorkflowUpdateQuery, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { +func (c *workflowServiceClient) Terminate(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { out := new(v1alpha1.Workflow) err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Terminate", in, out, opts...) if err != nil { @@ -480,17 +670,86 @@ func (c *workflowServiceClient) Terminate(ctx context.Context, in *WorkflowUpdat return out, nil } +func (c *workflowServiceClient) PodLogs(ctx context.Context, in *WorkflowLogRequest, opts ...grpc.CallOption) (WorkflowService_PodLogsClient, error) { + stream, err := c.cc.NewStream(ctx, &_WorkflowService_serviceDesc.Streams[0], "/workflow.WorkflowService/PodLogs", opts...) + if err != nil { + return nil, err + } + x := &workflowServicePodLogsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type WorkflowService_PodLogsClient interface { + Recv() (*LogEntry, error) + grpc.ClientStream +} + +type workflowServicePodLogsClient struct { + grpc.ClientStream +} + +func (x *workflowServicePodLogsClient) Recv() (*LogEntry, error) { + m := new(LogEntry) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // WorkflowServiceServer is the server API for WorkflowService service. type WorkflowServiceServer interface { - Create(context.Context, *v1alpha1.Workflow) (*v1alpha1.Workflow, error) - Get(context.Context, *WorkflowQuery) (*v1alpha1.Workflow, error) - List(context.Context, *WorkflowQuery) (*WorkflowListResponse, error) - Delete(context.Context, *WorkflowQuery) (*WorkflowResponse, error) - Retry(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) - Resubmit(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) - Resume(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) - Suspend(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) - Terminate(context.Context, *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) + Create(context.Context, *WorkflowCreateRequest) (*v1alpha1.Workflow, error) + Get(context.Context, *WorkflowGetRequest) (*v1alpha1.Workflow, error) + List(context.Context, *WorkflowListRequest) (*v1alpha1.WorkflowList, error) + Delete(context.Context, *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) + Retry(context.Context, *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) + Resubmit(context.Context, *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) + Resume(context.Context, *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) + Suspend(context.Context, *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) + Terminate(context.Context, *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) + // PodLogs returns stream of log entries for the specified pod. Pod + PodLogs(*WorkflowLogRequest, WorkflowService_PodLogsServer) error +} + +// UnimplementedWorkflowServiceServer can be embedded to have forward compatible implementations. +type UnimplementedWorkflowServiceServer struct { +} + +func (*UnimplementedWorkflowServiceServer) Create(ctx context.Context, req *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (*UnimplementedWorkflowServiceServer) Get(ctx context.Context, req *WorkflowGetRequest) (*v1alpha1.Workflow, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedWorkflowServiceServer) List(ctx context.Context, req *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (*UnimplementedWorkflowServiceServer) Delete(ctx context.Context, req *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedWorkflowServiceServer) Retry(ctx context.Context, req *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + return nil, status.Errorf(codes.Unimplemented, "method Retry not implemented") +} +func (*UnimplementedWorkflowServiceServer) Resubmit(ctx context.Context, req *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + return nil, status.Errorf(codes.Unimplemented, "method Resubmit not implemented") +} +func (*UnimplementedWorkflowServiceServer) Resume(ctx context.Context, req *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + return nil, status.Errorf(codes.Unimplemented, "method Resume not implemented") +} +func (*UnimplementedWorkflowServiceServer) Suspend(ctx context.Context, req *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + return nil, status.Errorf(codes.Unimplemented, "method Suspend not implemented") +} +func (*UnimplementedWorkflowServiceServer) Terminate(ctx context.Context, req *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + return nil, status.Errorf(codes.Unimplemented, "method Terminate not implemented") +} +func (*UnimplementedWorkflowServiceServer) PodLogs(req *WorkflowLogRequest, srv WorkflowService_PodLogsServer) error { + return status.Errorf(codes.Unimplemented, "method PodLogs not implemented") } func RegisterWorkflowServiceServer(s *grpc.Server, srv WorkflowServiceServer) { @@ -498,7 +757,7 @@ func RegisterWorkflowServiceServer(s *grpc.Server, srv WorkflowServiceServer) { } func _WorkflowService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(v1alpha1.Workflow) + in := new(WorkflowCreateRequest) if err := dec(in); err != nil { return nil, err } @@ -510,13 +769,13 @@ func _WorkflowService_Create_Handler(srv interface{}, ctx context.Context, dec f FullMethod: "/workflow.WorkflowService/Create", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).Create(ctx, req.(*v1alpha1.Workflow)) + return srv.(WorkflowServiceServer).Create(ctx, req.(*WorkflowCreateRequest)) } return interceptor(ctx, in, info, handler) } func _WorkflowService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WorkflowQuery) + in := new(WorkflowGetRequest) if err := dec(in); err != nil { return nil, err } @@ -528,13 +787,13 @@ func _WorkflowService_Get_Handler(srv interface{}, ctx context.Context, dec func FullMethod: "/workflow.WorkflowService/Get", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).Get(ctx, req.(*WorkflowQuery)) + return srv.(WorkflowServiceServer).Get(ctx, req.(*WorkflowGetRequest)) } return interceptor(ctx, in, info, handler) } func _WorkflowService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WorkflowQuery) + in := new(WorkflowListRequest) if err := dec(in); err != nil { return nil, err } @@ -546,13 +805,13 @@ func _WorkflowService_List_Handler(srv interface{}, ctx context.Context, dec fun FullMethod: "/workflow.WorkflowService/List", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).List(ctx, req.(*WorkflowQuery)) + return srv.(WorkflowServiceServer).List(ctx, req.(*WorkflowListRequest)) } return interceptor(ctx, in, info, handler) } func _WorkflowService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WorkflowQuery) + in := new(WorkflowDeleteRequest) if err := dec(in); err != nil { return nil, err } @@ -564,13 +823,13 @@ func _WorkflowService_Delete_Handler(srv interface{}, ctx context.Context, dec f FullMethod: "/workflow.WorkflowService/Delete", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).Delete(ctx, req.(*WorkflowQuery)) + return srv.(WorkflowServiceServer).Delete(ctx, req.(*WorkflowDeleteRequest)) } return interceptor(ctx, in, info, handler) } func _WorkflowService_Retry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WorkflowUpdateQuery) + in := new(WorkflowUpdateRequest) if err := dec(in); err != nil { return nil, err } @@ -582,13 +841,13 @@ func _WorkflowService_Retry_Handler(srv interface{}, ctx context.Context, dec fu FullMethod: "/workflow.WorkflowService/Retry", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).Retry(ctx, req.(*WorkflowUpdateQuery)) + return srv.(WorkflowServiceServer).Retry(ctx, req.(*WorkflowUpdateRequest)) } return interceptor(ctx, in, info, handler) } func _WorkflowService_Resubmit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WorkflowUpdateQuery) + in := new(WorkflowUpdateRequest) if err := dec(in); err != nil { return nil, err } @@ -600,13 +859,13 @@ func _WorkflowService_Resubmit_Handler(srv interface{}, ctx context.Context, dec FullMethod: "/workflow.WorkflowService/Resubmit", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).Resubmit(ctx, req.(*WorkflowUpdateQuery)) + return srv.(WorkflowServiceServer).Resubmit(ctx, req.(*WorkflowUpdateRequest)) } return interceptor(ctx, in, info, handler) } func _WorkflowService_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WorkflowUpdateQuery) + in := new(WorkflowUpdateRequest) if err := dec(in); err != nil { return nil, err } @@ -618,13 +877,13 @@ func _WorkflowService_Resume_Handler(srv interface{}, ctx context.Context, dec f FullMethod: "/workflow.WorkflowService/Resume", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).Resume(ctx, req.(*WorkflowUpdateQuery)) + return srv.(WorkflowServiceServer).Resume(ctx, req.(*WorkflowUpdateRequest)) } return interceptor(ctx, in, info, handler) } func _WorkflowService_Suspend_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WorkflowUpdateQuery) + in := new(WorkflowUpdateRequest) if err := dec(in); err != nil { return nil, err } @@ -636,13 +895,13 @@ func _WorkflowService_Suspend_Handler(srv interface{}, ctx context.Context, dec FullMethod: "/workflow.WorkflowService/Suspend", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).Suspend(ctx, req.(*WorkflowUpdateQuery)) + return srv.(WorkflowServiceServer).Suspend(ctx, req.(*WorkflowUpdateRequest)) } return interceptor(ctx, in, info, handler) } func _WorkflowService_Terminate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WorkflowUpdateQuery) + in := new(WorkflowUpdateRequest) if err := dec(in); err != nil { return nil, err } @@ -654,11 +913,32 @@ func _WorkflowService_Terminate_Handler(srv interface{}, ctx context.Context, de FullMethod: "/workflow.WorkflowService/Terminate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkflowServiceServer).Terminate(ctx, req.(*WorkflowUpdateQuery)) + return srv.(WorkflowServiceServer).Terminate(ctx, req.(*WorkflowUpdateRequest)) } return interceptor(ctx, in, info, handler) } +func _WorkflowService_PodLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WorkflowLogRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(WorkflowServiceServer).PodLogs(m, &workflowServicePodLogsServer{stream}) +} + +type WorkflowService_PodLogsServer interface { + Send(*LogEntry) error + grpc.ServerStream +} + +type workflowServicePodLogsServer struct { + grpc.ServerStream +} + +func (x *workflowServicePodLogsServer) Send(m *LogEntry) error { + return x.ServerStream.SendMsg(m) +} + var _WorkflowService_serviceDesc = grpc.ServiceDesc{ ServiceName: "workflow.WorkflowService", HandlerType: (*WorkflowServiceServer)(nil), @@ -700,229 +980,476 @@ var _WorkflowService_serviceDesc = grpc.ServiceDesc{ Handler: _WorkflowService_Terminate_Handler, }, }, - Streams: []grpc.StreamDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "PodLogs", + Handler: _WorkflowService_PodLogs_Handler, + ServerStreams: true, + }, + }, Metadata: "cmd/server/workflow/workflow.proto", } -func (m *WorkflowCreateResponse) Marshal() (dAtA []byte, err error) { +func (m *WorkflowCreateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *WorkflowCreateResponse) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *WorkflowCreateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Response) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Response))) - i += copy(dAtA[i:], m.Response) - } if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.CreateOptions != nil { + { + size, err := m.CreateOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Workflows != nil { + { + size, err := m.Workflows.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *WorkflowListResponse) Marshal() (dAtA []byte, err error) { +func (m *WorkflowGetRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *WorkflowListResponse) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *WorkflowGetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Workflows) > 0 { - for _, msg := range m.Workflows { - dAtA[i] = 0xa - i++ - i = encodeVarintWorkflow(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.GetOptions != nil { + { + size, err := m.GetOptions.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 } - return i, nil + if len(m.WorkflowName) > 0 { + i -= len(m.WorkflowName) + copy(dAtA[i:], m.WorkflowName) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.WorkflowName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *WorkflowResponse) Marshal() (dAtA []byte, err error) { +func (m *WorkflowListRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *WorkflowResponse) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *WorkflowListRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Status) > 0 { + if m.ListOptions != nil { + { + size, err := m.ListOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x12 - i++ - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) } - if len(m.Message) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *WorkflowQuery) Marshal() (dAtA []byte, err error) { +func (m *WorkflowUpdateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *WorkflowQuery) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *WorkflowUpdateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } if len(m.Namespace) > 0 { - dAtA[i] = 0x12 - i++ + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - } - if m.StartIdx != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintWorkflow(dAtA, i, uint64(m.StartIdx)) - } - if m.PageSize != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintWorkflow(dAtA, i, uint64(m.PageSize)) + i-- + dAtA[i] = 0x12 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if len(m.WorkflowName) > 0 { + i -= len(m.WorkflowName) + copy(dAtA[i:], m.WorkflowName) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.WorkflowName))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } -func (m *WorkflowUpdateQuery) Marshal() (dAtA []byte, err error) { +func (m *WorkflowLogRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *WorkflowUpdateQuery) MarshalTo(dAtA []byte) (int, error) { - var i int +func (m *WorkflowLogRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowLogRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Workflow != nil { + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Container) > 0 { + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + } + if len(m.PodName) > 0 { + i -= len(m.PodName) + copy(dAtA[i:], m.PodName) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.PodName))) + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.WorkflowName) > 0 { + i -= len(m.WorkflowName) + copy(dAtA[i:], m.WorkflowName) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.WorkflowName))) + i-- dAtA[i] = 0xa - i++ - i = encodeVarintWorkflow(dAtA, i, uint64(m.Workflow.Size())) - n1, err := m.Workflow.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Memoized { - dAtA[i] = 0x10 - i++ - if m.Memoized { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + return len(dAtA) - i, nil +} + +func (m *WorkflowDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return i, nil + return dAtA[:n], nil } -func encodeVarintWorkflow(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { +func (m *WorkflowDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.WorkflowName) > 0 { + i -= len(m.WorkflowName) + copy(dAtA[i:], m.WorkflowName) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.WorkflowName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *LogEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LogEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.TimeStamp != nil { + { + size, err := m.TimeStamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Content) > 0 { + i -= len(m.Content) + copy(dAtA[i:], m.Content) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Content))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWorkflow(dAtA []byte, offset int, v uint64) int { + offset -= sovWorkflow(v) + base := offset + for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } -func (m *WorkflowCreateResponse) Size() (n int) { +func (m *WorkflowCreateRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Response) + if m.Workflows != nil { + l = m.Workflows.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.CreateOptions != nil { + l = m.CreateOptions.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowGetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WorkflowName) if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.GetOptions != nil { + l = m.GetOptions.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } -func (m *WorkflowListResponse) Size() (n int) { +func (m *WorkflowListRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Workflows) > 0 { - for _, e := range m.Workflows { - l = e.Size() - n += 1 + l + sovWorkflow(uint64(l)) - } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.ListOptions != nil { + l = m.ListOptions.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowUpdateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.WorkflowName) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -930,21 +1457,25 @@ func (m *WorkflowListResponse) Size() (n int) { return n } -func (m *WorkflowResponse) Size() (n int) { +func (m *WorkflowLogRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + l = len(m.WorkflowName) if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } - l = len(m.Status) + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.PodName) if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } - l = len(m.Message) + l = len(m.Container) if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } @@ -954,13 +1485,13 @@ func (m *WorkflowResponse) Size() (n int) { return n } -func (m *WorkflowQuery) Size() (n int) { +func (m *WorkflowDeleteRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + l = len(m.WorkflowName) if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } @@ -968,51 +1499,335 @@ func (m *WorkflowQuery) Size() (n int) { if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } - if m.StartIdx != 0 { - n += 1 + sovWorkflow(uint64(m.StartIdx)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovWorkflow(uint64(l)) } - if m.PageSize != 0 { - n += 1 + sovWorkflow(uint64(m.PageSize)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) } + return n +} + +func (m *WorkflowDeleteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } -func (m *WorkflowUpdateQuery) Size() (n int) { +func (m *LogEntry) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Workflow != nil { - l = m.Workflow.Size() + l = len(m.Content) + if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } - if m.Memoized { - n += 2 + if m.TimeStamp != nil { + l = m.TimeStamp.Size() + n += 1 + l + sovWorkflow(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } - return n -} + return n +} + +func sovWorkflow(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWorkflow(x uint64) (n int) { + return sovWorkflow(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowCreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Workflows == nil { + m.Workflows = &v1alpha1.Workflow{} + } + if err := m.Workflows.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreateOptions == nil { + m.CreateOptions = &v1.CreateOptions{} + } + if err := m.CreateOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkflowName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetOptions == nil { + m.GetOptions = &v1.GetOptions{} + } + if err := m.GetOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } -func sovWorkflow(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } + if iNdEx > l { + return io.ErrUnexpectedEOF } - return n -} -func sozWorkflow(x uint64) (n int) { - return sovWorkflow(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *WorkflowCreateResponse) Unmarshal(dAtA []byte) error { +func (m *WorkflowListRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1035,15 +1850,15 @@ func (m *WorkflowCreateResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowCreateResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowListRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowCreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowListRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1071,7 +1886,43 @@ func (m *WorkflowCreateResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Response = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ListOptions == nil { + m.ListOptions = &v1.ListOptions{} + } + if err := m.ListOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -1098,7 +1949,7 @@ func (m *WorkflowCreateResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkflowListResponse) Unmarshal(dAtA []byte) error { +func (m *WorkflowUpdateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1121,17 +1972,17 @@ func (m *WorkflowListResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowListResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowUpdateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowWorkflow @@ -1141,25 +1992,55 @@ func (m *WorkflowListResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthWorkflow } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthWorkflow } if postIndex > l { return io.ErrUnexpectedEOF } - m.Workflows = append(m.Workflows, &v1alpha1.Workflow{}) - if err := m.Workflows[len(m.Workflows)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.WorkflowName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1186,7 +2067,7 @@ func (m *WorkflowListResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkflowResponse) Unmarshal(dAtA []byte) error { +func (m *WorkflowLogRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1209,15 +2090,15 @@ func (m *WorkflowResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowResponse: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowLogRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1245,11 +2126,11 @@ func (m *WorkflowResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.WorkflowName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1277,11 +2158,43 @@ func (m *WorkflowResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1309,7 +2222,7 @@ func (m *WorkflowResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Container = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -1336,7 +2249,7 @@ func (m *WorkflowResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkflowQuery) Unmarshal(dAtA []byte) error { +func (m *WorkflowDeleteRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1359,15 +2272,15 @@ func (m *WorkflowQuery) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowQuery: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowDeleteRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowQuery: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -1395,7 +2308,7 @@ func (m *WorkflowQuery) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.WorkflowName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -1430,10 +2343,10 @@ func (m *WorkflowQuery) Unmarshal(dAtA []byte) error { m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartIdx", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) } - m.StartIdx = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowWorkflow @@ -1443,30 +2356,82 @@ func (m *WorkflowQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.StartIdx |= int32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PageSize", wireType) + if msglen < 0 { + return ErrInvalidLengthWorkflow } - m.PageSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PageSize |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skipWorkflow(dAtA[iNdEx:]) @@ -1492,7 +2457,7 @@ func (m *WorkflowQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkflowUpdateQuery) Unmarshal(dAtA []byte) error { +func (m *LogEntry) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1515,17 +2480,17 @@ func (m *WorkflowUpdateQuery) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkflowUpdateQuery: wiretype end group for non-group") + return fmt.Errorf("proto: LogEntry: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowUpdateQuery: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: LogEntry: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowWorkflow @@ -1535,33 +2500,29 @@ func (m *WorkflowUpdateQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthWorkflow } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthWorkflow } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Workflow == nil { - m.Workflow = &v1alpha1.Workflow{} - } - if err := m.Workflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Content = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Memoized", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeStamp", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowWorkflow @@ -1571,12 +2532,28 @@ func (m *WorkflowUpdateQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Memoized = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeStamp == nil { + m.TimeStamp = &v1.Time{} + } + if err := m.TimeStamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipWorkflow(dAtA[iNdEx:]) diff --git a/cmd/server/workflow/workflow.pb.gw.go b/cmd/server/workflow/workflow.pb.gw.go index 6aba99f04977..0dbd8ee811e5 100644 --- a/cmd/server/workflow/workflow.pb.gw.go +++ b/cmd/server/workflow/workflow.pb.gw.go @@ -13,7 +13,6 @@ import ( "io" "net/http" - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/grpc-ecosystem/grpc-gateway/utilities" @@ -30,7 +29,7 @@ var _ = runtime.String var _ = utilities.NewDoubleArray func request_WorkflowService_Create_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq v1alpha1.Workflow + var protoReq WorkflowCreateRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) @@ -46,12 +45,29 @@ func request_WorkflowService_Create_0(ctx context.Context, marshaler runtime.Mar } +func local_request_WorkflowService_Create_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowCreateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Create(ctx, &protoReq) + return msg, metadata, err + +} + var ( - filter_WorkflowService_Get_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} + filter_WorkflowService_Get_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0, "WorkflowName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} ) func request_WorkflowService_Get_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WorkflowQuery + var protoReq WorkflowGetRequest var metadata runtime.ServerMetadata var ( @@ -61,15 +77,26 @@ func request_WorkflowService_Get_0(ctx context.Context, marshaler runtime.Marsha _ = err ) - val, ok = pathParams["name"] + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") } - protoReq.Name, err = runtime.String(val) + protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) } if err := req.ParseForm(); err != nil { @@ -84,14 +111,74 @@ func request_WorkflowService_Get_0(ctx context.Context, marshaler runtime.Marsha } +func local_request_WorkflowService_Get_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowGetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WorkflowService_Get_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Get(ctx, &protoReq) + return msg, metadata, err + +} + var ( - filter_WorkflowService_List_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} + filter_WorkflowService_List_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) func request_WorkflowService_List_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WorkflowQuery + var protoReq WorkflowListRequest var metadata runtime.ServerMetadata + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -104,12 +191,43 @@ func request_WorkflowService_List_0(ctx context.Context, marshaler runtime.Marsh } +func local_request_WorkflowService_List_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowListRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WorkflowService_List_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.List(ctx, &protoReq) + return msg, metadata, err + +} + var ( - filter_WorkflowService_Delete_0 = &utilities.DoubleArray{Encoding: map[string]int{"name": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} + filter_WorkflowService_Delete_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0, "WorkflowName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} ) func request_WorkflowService_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WorkflowQuery + var protoReq WorkflowDeleteRequest var metadata runtime.ServerMetadata var ( @@ -119,15 +237,26 @@ func request_WorkflowService_Delete_0(ctx context.Context, marshaler runtime.Mar _ = err ) - val, ok = pathParams["name"] + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") } - protoReq.Name, err = runtime.String(val) + protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "name", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) } if err := req.ParseForm(); err != nil { @@ -142,19 +271,57 @@ func request_WorkflowService_Delete_0(ctx context.Context, marshaler runtime.Mar } -var ( - filter_WorkflowService_Retry_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} -) +func local_request_WorkflowService_Delete_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowDeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WorkflowService_Delete_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Delete(ctx, &protoReq) + return msg, metadata, err + +} func request_WorkflowService_Retry_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WorkflowUpdateQuery + var protoReq WorkflowUpdateRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -165,22 +332,26 @@ func request_WorkflowService_Retry_0(ctx context.Context, marshaler runtime.Mars _ = err ) - val, ok = pathParams["workflow.metadata.name"] + val, ok = pathParams["Namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") } - err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) } - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Retry_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) } msg, err := client.Retry(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -188,19 +359,337 @@ func request_WorkflowService_Retry_0(ctx context.Context, marshaler runtime.Mars } -var ( - filter_WorkflowService_Resubmit_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} -) +func local_request_WorkflowService_Retry_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + msg, err := server.Retry(ctx, &protoReq) + return msg, metadata, err + +} func request_WorkflowService_Resubmit_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WorkflowUpdateQuery + var protoReq WorkflowUpdateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + msg, err := client.Resubmit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_Resubmit_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + msg, err := server.Resubmit(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_Resume_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + msg, err := client.Resume(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_Resume_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + msg, err := server.Resume(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_Suspend_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + msg, err := client.Suspend(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_Suspend_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + msg, err := server.Suspend(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowService_Terminate_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -211,42 +700,42 @@ func request_WorkflowService_Resubmit_0(ctx context.Context, marshaler runtime.M _ = err ) - val, ok = pathParams["workflow.metadata.name"] + val, ok = pathParams["Namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") } - err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) } - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Resubmit_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) } - msg, err := client.Resubmit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.Terminate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -var ( - filter_WorkflowService_Resume_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} -) - -func request_WorkflowService_Resume_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WorkflowUpdateQuery +func local_request_WorkflowService_Terminate_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowUpdateRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) if berr != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -257,45 +746,37 @@ func request_WorkflowService_Resume_0(ctx context.Context, marshaler runtime.Mar _ = err ) - val, ok = pathParams["workflow.metadata.name"] + val, ok = pathParams["Namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") } - err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) } - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Resume_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) } - msg, err := client.Resume(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := server.Terminate(ctx, &protoReq) return msg, metadata, err } -var ( - filter_WorkflowService_Suspend_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} -) - -func request_WorkflowService_Suspend_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WorkflowUpdateQuery +func request_WorkflowService_PodLogs_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (WorkflowService_PodLogsClient, runtime.ServerMetadata, error) { + var protoReq WorkflowLogRequest var metadata runtime.ServerMetadata - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - var ( val string ok bool @@ -303,73 +784,256 @@ func request_WorkflowService_Suspend_0(ctx context.Context, marshaler runtime.Ma _ = err ) - val, ok = pathParams["workflow.metadata.name"] + val, ok = pathParams["Namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") } - err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) } - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Suspend_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") } - msg, err := client.Suspend(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err + protoReq.WorkflowName, err = runtime.String(val) -} + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } -var ( - filter_WorkflowService_Terminate_0 = &utilities.DoubleArray{Encoding: map[string]int{"workflow": 0, "metadata": 1, "name": 2}, Base: []int{1, 2, 1, 1, 0, 0}, Check: []int{0, 1, 2, 3, 4, 2}} -) + val, ok = pathParams["PodName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "PodName") + } -func request_WorkflowService_Terminate_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq WorkflowUpdateQuery - var metadata runtime.ServerMetadata + protoReq.PodName, err = runtime.String(val) - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "PodName", err) } - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["workflow.metadata.name"] + val, ok = pathParams["Container"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflow.metadata.name") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Container") } - err = runtime.PopulateFieldFromPath(&protoReq, "workflow.metadata.name", val) + protoReq.Container, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflow.metadata.name", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Container", err) } - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + stream, err := client.PodLogs(ctx, &protoReq) + if err != nil { + return nil, metadata, err } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Terminate_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + header, err := stream.Header() + if err != nil { + return nil, metadata, err } + metadata.HeaderMD = header + return stream, metadata, nil - msg, err := client.Terminate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err +} + +// RegisterWorkflowServiceHandlerServer registers the http handlers for service WorkflowService to "mux". +// UnaryRPC :call WorkflowServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterWorkflowServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server WorkflowServiceServer) error { + + mux.Handle("POST", pattern_WorkflowService_Create_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_Create_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Create_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_Get_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowService_List_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_List_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_List_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_WorkflowService_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_Delete_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Retry_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_Retry_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Retry_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Resubmit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_Resubmit_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Resubmit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Resume_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_Resume_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Resume_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Suspend_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_Suspend_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Suspend_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PUT", pattern_WorkflowService_Terminate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_Terminate_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Terminate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + mux.Handle("GET", pattern_WorkflowService_PodLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + + return nil } // RegisterWorkflowServiceHandlerFromEndpoint is same as RegisterWorkflowServiceHandler but @@ -590,27 +1254,49 @@ func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.Serv }) + mux.Handle("GET", pattern_WorkflowService_PodLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_PodLogs_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_PodLogs_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + return nil } var ( pattern_WorkflowService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflows"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "name"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflows"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "Namespace"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "name"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Retry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Retry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Resubmit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Resubmit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Resume_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Resume_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Suspend_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Suspend_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Terminate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "workflow.metadata.name"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Terminate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_PodLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6, 2, 7, 1, 0, 4, 1, 5, 8}, []string{"api", "v1", "workflow", "Namespace", "WorkflowName", "pods", "PodName", "logs", "Container"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( @@ -631,4 +1317,6 @@ var ( forward_WorkflowService_Suspend_0 = runtime.ForwardResponseMessage forward_WorkflowService_Terminate_0 = runtime.ForwardResponseMessage + + forward_WorkflowService_PodLogs_0 = runtime.ForwardResponseStream ) diff --git a/cmd/server/workflow/workflow.proto b/cmd/server/workflow/workflow.proto index 0f2a13492b9f..07c316896b93 100644 --- a/cmd/server/workflow/workflow.proto +++ b/cmd/server/workflow/workflow.proto @@ -12,97 +12,106 @@ import "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1/generated.proto"; // Workflow Service API performs CRUD actions against application resources package workflow; -message WorkflowCreateResponse{ - string response =1; - -} -message WorkflowListResponse{ - repeated github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow workflows =1; +message WorkflowCreateRequest{ + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow Workflows =1; + k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions CreateOptions =2; } +message WorkflowGetRequest{ + string WorkflowName =1; + string Namespace = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions GetOptions =3; +} -//message LogEntry { -// string content = 1 [(gogoproto.nullable) = false]; -// k8s.io.apimachinery.pkg.apis.meta.v1.Time timeStamp = 2 [(gogoproto.nullable) = false]; -//} +message WorkflowListRequest{ + string Namespace = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions ListOptions =2; +} +message WorkflowUpdateRequest{ + string WorkflowName =1; + string Namespace = 2; +} -message WorkflowResponse{ - string Name = 1; - string Status = 2; - string Message = 3; +message WorkflowLogRequest{ + string WorkflowName =1; + string Namespace = 2; + string PodName = 3; + string Container = 4; } -message WorkflowQuery{ - string name = 1; + +message WorkflowDeleteRequest{ + string WorkflowName =1; string Namespace = 2; - int32 StartIdx = 3; - int32 PageSize = 4; + k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions DeleteOptions =3; } +message WorkflowDeleteResponse{} -message WorkflowUpdateQuery{ - github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow workflow = 1; - bool memoized = 2; +message LogEntry { + string content = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.Time timeStamp = 2; } + service WorkflowService { - rpc Create(github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Create(WorkflowCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { post: "/api/v1/workflows" body: "*" }; } - rpc Get(WorkflowQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ - option (google.api.http).get = "/api/v1/workflows/{name}"; + rpc Get(WorkflowGetRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + option (google.api.http).get = "/api/v1/workflows/{Namespace}/{WorkflowName}"; } - rpc List(WorkflowQuery) returns (WorkflowListResponse){ - option (google.api.http).get = "/api/v1/workflows"; + rpc List(WorkflowListRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowList){ + option (google.api.http).get = "/api/v1/workflows/{Namespace}"; } - rpc Delete(WorkflowQuery) returns (WorkflowResponse){ - option (google.api.http).delete = "/api/v1/workflows/{name}"; + rpc Delete(WorkflowDeleteRequest) returns (WorkflowDeleteResponse){ + option (google.api.http).delete = "/api/v1/workflows/{Namespace}/{WorkflowName}"; } - rpc Retry(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Retry(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{workflow.metadata.name}" - body: "workflow" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + body: "*" }; } - rpc Resubmit(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Resubmit(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{workflow.metadata.name}" - body: "workflow" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + body: "*" }; } - rpc Resume(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Resume(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{workflow.metadata.name}" - body: "workflow" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + body: "*" }; } - rpc Suspend(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Suspend(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{workflow.metadata.name}" - body: "workflow" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + body: "*" }; } - rpc Terminate(WorkflowUpdateQuery) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Terminate(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{workflow.metadata.name}" - body: "workflow" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + body: "*" }; } -// // PodLogs returns stream of log entries for the specified pod. Pod -// rpc PodLogs(WorkflowQuery) returns (stream LogEntry) { -// option (google.api.http).get = "/api/v1/workflow/{name}/pods/{podName}/logs"; -// } + // PodLogs returns stream of log entries for the specified pod. Pod + rpc PodLogs(WorkflowLogRequest) returns (stream LogEntry) { + option (google.api.http).get = "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs/{Container}"; + } } \ No newline at end of file diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json index 516e38762402..9c3378131396 100644 --- a/cmd/server/workflow/workflow.swagger.json +++ b/cmd/server/workflow/workflow.swagger.json @@ -16,49 +16,50 @@ "application/json" ], "paths": { - "/api/v1/workflows": { + "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs/{Container}": { "get": { - "operationId": "List", + "summary": "PodLogs returns stream of log entries for the specified pod. Pod", + "operationId": "PodLogs", "responses": { "200": { - "description": "A successful response.", + "description": "A successful response.(streaming responses)", "schema": { - "$ref": "#/definitions/workflowWorkflowListResponse" + "$ref": "#/x-stream-definitions/workflowLogEntry" } } }, "parameters": [ { - "name": "name", - "in": "query", - "required": false, + "name": "Namespace", + "in": "path", + "required": true, "type": "string" }, { - "name": "Namespace", - "in": "query", - "required": false, + "name": "WorkflowName", + "in": "path", + "required": true, "type": "string" }, { - "name": "StartIdx", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" + "name": "PodName", + "in": "path", + "required": true, + "type": "string" }, { - "name": "PageSize", - "in": "query", - "required": false, - "type": "integer", - "format": "int32" + "name": "Container", + "in": "path", + "required": true, + "type": "string" } ], "tags": [ "WorkflowService" ] - }, + } + }, + "/api/v1/workflows": { "post": { "operationId": "Create", "responses": { @@ -75,7 +76,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/v1alpha1Workflow" + "$ref": "#/definitions/workflowWorkflowCreateRequest" } } ], @@ -84,43 +85,112 @@ ] } }, - "/api/v1/workflows/{name}": { + "/api/v1/workflows/{Namespace}": { "get": { - "operationId": "Get", + "operationId": "List", "responses": { "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/v1alpha1Workflow" + "$ref": "#/definitions/v1alpha1WorkflowList" } } }, "parameters": [ { - "name": "name", + "name": "Namespace", "in": "path", "required": true, "type": "string" }, { - "name": "Namespace", + "name": "ListOptions.labelSelector", + "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", "in": "query", "required": false, "type": "string" }, { - "name": "StartIdx", + "name": "ListOptions.fieldSelector", + "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", "in": "query", "required": false, - "type": "integer", - "format": "int32" + "type": "string" + }, + { + "name": "ListOptions.watch", + "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "ListOptions.resourceVersion", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "ListOptions.timeoutSeconds", + "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" }, { - "name": "PageSize", + "name": "ListOptions.limit", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", "in": "query", "required": false, - "type": "integer", - "format": "int32" + "type": "string", + "format": "int64" + }, + { + "name": "ListOptions.continue", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "required": false, + "type": "string" + } + ], + "tags": [ + "WorkflowService" + ] + } + }, + "/api/v1/workflows/{Namespace}/{WorkflowName}": { + "get": { + "operationId": "Get", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "parameters": [ + { + "name": "Namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "WorkflowName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "GetOptions.resourceVersion", + "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.", + "in": "query", + "required": false, + "type": "string" } ], "tags": [ @@ -133,44 +203,76 @@ "200": { "description": "A successful response.", "schema": { - "$ref": "#/definitions/workflowWorkflowResponse" + "$ref": "#/definitions/workflowWorkflowDeleteResponse" } } }, "parameters": [ { - "name": "name", + "name": "Namespace", "in": "path", "required": true, "type": "string" }, { - "name": "Namespace", + "name": "WorkflowName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "DeleteOptions.gracePeriodSeconds", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "DeleteOptions.preconditions.uid", + "description": "Specifies the target UID.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "DeleteOptions.preconditions.resourceVersion", + "description": "Specifies the target ResourceVersion\n+optional.", "in": "query", "required": false, "type": "string" }, { - "name": "StartIdx", + "name": "DeleteOptions.orphanDependents", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "DeleteOptions.propagationPolicy", + "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.", "in": "query", "required": false, - "type": "integer", - "format": "int32" + "type": "string" }, { - "name": "PageSize", + "name": "DeleteOptions.dryRun", + "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.", "in": "query", "required": false, - "type": "integer", - "format": "int32" + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi" } ], "tags": [ "WorkflowService" ] - } - }, - "/api/v1/workflows/{workflow.metadata.name}": { + }, "put": { "operationId": "Terminate", "responses": { @@ -183,8 +285,13 @@ }, "parameters": [ { - "name": "workflow.metadata.name", - "description": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional", + "name": "Namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "WorkflowName", "in": "path", "required": true, "type": "string" @@ -194,7 +301,7 @@ "in": "body", "required": true, "schema": { - "$ref": "#/definitions/v1alpha1Workflow" + "$ref": "#/definitions/workflowWorkflowUpdateRequest" } } ], @@ -205,6 +312,20 @@ } }, "definitions": { + "apismetav1Preconditions": { + "type": "object", + "properties": { + "uid": { + "type": "string", + "title": "Specifies the target UID.\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "Specifies the target ResourceVersion\n+optional" + } + }, + "description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out." + }, "intstrIntOrString": { "type": "object", "properties": { @@ -223,6 +344,18 @@ "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", "title": "IntOrString is a type that can hold an int32 or a string. When used in\nJSON or YAML marshalling and unmarshalling, it produces or consumes the\ninner type. This allows you to have, for example, a JSON field that can\naccept a name or number.\nTODO: Rename to Int32OrString" }, + "protobufAny": { + "type": "object", + "properties": { + "type_url": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, "resourceQuantity": { "type": "object", "properties": { @@ -232,6 +365,31 @@ }, "description": "Quantity is a fixed-point representation of a number.\nIt provides convenient marshaling/unmarshaling in JSON and YAML,\nin addition to String() and Int64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9\n\u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e\n\u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e\n\u003csign\u003e ::= \"+\" | \"-\"\n\u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e\n\u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e\n\u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\nplaces. Numbers larger or more precise will be capped or rounded up.\n(E.g.: 0.1m will rounded up to 1m.)\nThis may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix\nit had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\".\nThis means that Exponent/suffix will be adjusted up or down (with a\ncorresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a\nfloating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed,\nbut will be re-emitted in their canonical form. (So always use canonical\nform, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without\nwriting some sort of special handling code in the hopes that that will\ncause implementors to also use a fixed point implementation.\n\n+protobuf=true\n+protobuf.embed=string\n+protobuf.options.marshal=false\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:deepcopy-gen=true\n+k8s:openapi-gen=true" }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpc_code": { + "type": "integer", + "format": "int32" + }, + "http_code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "http_status": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + }, "v1AWSElasticBlockStoreVolumeSource": { "type": "object", "properties": { @@ -654,6 +812,54 @@ }, "description": "ContainerPort represents a network port in a single container." }, + "v1CreateOptions": { + "type": "object", + "properties": { + "dryRun": { + "type": "array", + "items": { + "type": "string" + }, + "title": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional" + }, + "fieldManager": { + "type": "string", + "title": "fieldManager is a name associated with the actor or entity\nthat is making these changes. The value must be less than or\n128 characters long, and only contain printable characters,\nas defined by https://golang.org/pkg/unicode/#IsPrint.\n+optional" + } + }, + "description": "CreateOptions may be provided when creating an API object." + }, + "v1DeleteOptions": { + "type": "object", + "properties": { + "gracePeriodSeconds": { + "type": "string", + "format": "int64", + "title": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional" + }, + "preconditions": { + "$ref": "#/definitions/apismetav1Preconditions", + "title": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be\nreturned.\n+optional" + }, + "orphanDependents": { + "type": "boolean", + "format": "boolean", + "title": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional" + }, + "propagationPolicy": { + "type": "string", + "title": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional" + }, + "dryRun": { + "type": "array", + "items": { + "type": "string" + }, + "title": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional" + } + }, + "description": "DeleteOptions may be provided when deleting an API object." + }, "v1DownwardAPIProjection": { "type": "object", "properties": { @@ -908,6 +1114,16 @@ }, "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must\nalso be in the same GCE project and zone as the kubelet. A GCE PD\ncan only be mounted as read/write once or read-only many times. GCE\nPDs support ownership management and SELinux relabeling." }, + "v1GetOptions": { + "type": "object", + "properties": { + "resourceVersion": { + "type": "string", + "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv." + } + }, + "description": "GetOptions is the standard query options to the standard REST get call." + }, "v1GitRepoVolumeSource": { "type": "object", "properties": { @@ -1213,6 +1429,43 @@ }, "description": "ListMeta describes metadata that synthetic resources must have, including lists and\nvarious status objects. A resource may have only one of {ObjectMeta, ListMeta}." }, + "v1ListOptions": { + "type": "object", + "properties": { + "labelSelector": { + "type": "string", + "title": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional" + }, + "fieldSelector": { + "type": "string", + "title": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional" + }, + "watch": { + "type": "boolean", + "format": "boolean", + "title": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional" + }, + "timeoutSeconds": { + "type": "string", + "format": "int64", + "title": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional" + }, + "limit": { + "type": "string", + "format": "int64", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned." + }, + "continue": { + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications." + } + }, + "description": "ListOptions is the query options to a standard REST list call." + }, "v1LocalObjectReference": { "type": "object", "properties": { @@ -3513,6 +3766,21 @@ }, "title": "Workflow is the definition of a workflow resource\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object" }, + "v1alpha1WorkflowList": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/v1ListMeta" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "title": "WorkflowList is list of Workflow resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object" + }, "v1alpha1WorkflowSpec": { "type": "object", "properties": { @@ -3740,27 +4008,38 @@ }, "title": "WorkflowStep is a reference to a template to execute in a series of step" }, - "workflowWorkflowListResponse": { + "workflowLogEntry": { "type": "object", "properties": { - "workflows": { - "type": "array", - "items": { - "$ref": "#/definitions/v1alpha1Workflow" - } + "content": { + "type": "string" + }, + "timeStamp": { + "$ref": "#/definitions/v1Time" } } }, - "workflowWorkflowResponse": { + "workflowWorkflowCreateRequest": { "type": "object", "properties": { - "Name": { - "type": "string" + "Workflows": { + "$ref": "#/definitions/v1alpha1Workflow" }, - "Status": { + "CreateOptions": { + "$ref": "#/definitions/v1CreateOptions" + } + } + }, + "workflowWorkflowDeleteResponse": { + "type": "object" + }, + "workflowWorkflowUpdateRequest": { + "type": "object", + "properties": { + "WorkflowName": { "type": "string" }, - "Message": { + "Namespace": { "type": "string" } } @@ -3846,5 +4125,19 @@ }, "title": "NodeStatus contains status information about an individual node in the workflow\n+k8s:openapi-gen=false" } + }, + "x-stream-definitions": { + "workflowLogEntry": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/workflowLogEntry" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of workflowLogEntry" + } } } diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go index fcaa6dbd1d51..df17c262d6ca 100644 --- a/cmd/server/workflow/workflow_service.go +++ b/cmd/server/workflow/workflow_service.go @@ -2,170 +2,199 @@ package workflow import ( "encoding/json" + "errors" "fmt" "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" "github.com/argoproj/argo/workflow/util" + log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc/metadata" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) type Server struct { Namespace string - Clientset versioned.Clientset + WfClientset *versioned.Clientset + KubeClientset *kubernetes.Clientset EnableClientAuth bool } -func NewServer(Namespace string, clientset versioned.Clientset, enableClientAuth bool) WorkflowServiceServer { - return &Server{Namespace: Namespace, Clientset: clientset, EnableClientAuth: enableClientAuth} +func NewServer(Namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, enableClientAuth bool) WorkflowServiceServer { + return &Server{Namespace: Namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} } -func (s *Server) GetClientSet(md metadata.MD) (*versioned.Clientset, error) { +func (s *Server) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { + + md, _ := metadata.FromIncomingContext(ctx) if s.EnableClientAuth { - return &s.Clientset, nil + return s.WfClientset, s.KubeClientset, nil } var restConfigStr, bearerToken string - + if len(md.Get(CLIENT_REST_CONFIG)) == 0 { + return nil,nil, errors.New("Client kubeconfig is not found") + } restConfigStr = md.Get(CLIENT_REST_CONFIG)[0] - bearerToken = md.Get(AUTH_TOKEN)[0] + if len(md.Get(AUTH_TOKEN)) > 0 { + bearerToken = md.Get(AUTH_TOKEN)[0] + } restConfig := rest.Config{} + err := json.Unmarshal([]byte(restConfigStr), &restConfig) if err != nil { - return nil, err + return nil, nil, err } restConfig.BearerToken = string(bearerToken) - //restConfig :=rest.Config{ - // // TODO: switch to using cluster DNS. - // Host: host, - // TLSClientConfig: tlsClientConfig, - // BearerToken: string(bearerToken), - // - // } fmt.Println(restConfigStr) // create the clientset - clientset, err := wfclientset.NewForConfig(&restConfig) + wfClientset, err := wfclientset.NewForConfig(&restConfig) + + // create the clientset + clientset, err := kubernetes.NewForConfig(&restConfig) if err != nil { - return nil, err + log.Warnf("Failure to create WfClientset. ClientConfig: %s, Error: %s", restConfig, err) + return nil, nil, err } - return clientset, nil + return wfClientset, clientset, nil } func (s *Server) Create(ctx context.Context, in *v1alpha1.Workflow) (*v1alpha1.Workflow, error) { - md, _ := metadata.FromIncomingContext(ctx) - clientset, err := s.GetClientSet(md) - - if clientset == nil { - return nil, nil + wfClient, _, err := s.GetWFClient(ctx) + if err != nil { + return nil, err } + namespace := s.Namespace if in.Namespace != "" { namespace = in.Namespace } - wf, err := s.Clientset.ArgoprojV1alpha1().Workflows(namespace).Create(in) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Create(in) if err != nil { - fmt.Println(err) + log.Warnf("Create request is failed. Error: %s", err) return nil, err } - + log.Info("Workflow created successfully. Name: %s", wf.Name) return wf, nil } -func (s *Server) Get(ctx context.Context, in *WorkflowQuery) (*v1alpha1.Workflow, error) { +func (s *Server) Get(ctx context.Context, in *WorkflowGetRequest) (*v1alpha1.Workflow, error) { namespace := s.Namespace if in.Namespace != "" { namespace = in.Namespace } - md, _ := metadata.FromIncomingContext(ctx) - - clientset, err := s.GetClientSet(md) - - if clientset == nil { - return nil, nil + wfClient, _, err := s.GetWFClient(ctx) + if err != nil { + return nil, err } - wf, err := clientset.ArgoprojV1alpha1().Workflows(namespace).Get(in.Name, v1.GetOptions{}) + + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) if err != nil { - fmt.Println(err) return nil, err } return wf, err } -func (s *Server) List(ctx context.Context, in *WorkflowQuery) (*WorkflowListResponse, error) { +func (s *Server) List(ctx context.Context, in *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { namespace := s.Namespace if in.Namespace != "" { namespace = in.Namespace } + wfClient, _, err := s.GetWFClient(ctx) - wfList, err := s.Clientset.ArgoprojV1alpha1().Workflows(namespace).List(v1.ListOptions{}) if err != nil { - fmt.Println(err) + return nil, err } - //fmt.Println(wfList) - var wfListItem []*v1alpha1.Workflow - for idx, _ := range wfList.Items { - wfListItem = append(wfListItem, &wfList.Items[idx]) + wfList, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).List(v1.ListOptions{}) + if err != nil { + fmt.Println(err) } - var wfListRsp = WorkflowListResponse{} - wfListRsp.Workflows = wfListItem - fmt.Println(wfListRsp) - return &wfListRsp, nil + + return wfList, nil } -func (s *Server) Delete(ctx context.Context, in *WorkflowQuery) (*WorkflowResponse, error) { - return nil, nil + +func (s *Server) Delete(ctx context.Context, in *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { + namespace := s.Namespace + + if in.Namespace != "" { + namespace = in.Namespace + } + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(in.WorkflowName, &v1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + return nil, err + } + + return fmt.Sprint("Workflow '%s' deleted\n", in.WorkflowName), nil } -func (s *Server) Retry(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { - //namespace := s.Namespace - //if in.Workflow.Namespace != "" { - // namespace = in.Workflow.Namespace - //} - //kubeClient := commonutil.InitKubeClient() - // - ////wf, err := util.RetryWorkflow(kubeClient., s.Clientset.ArgoprojV1alpha1().Workflows(namespace),in.Workflow) - // - //if err != nil { - // fmt.Println(err) - // return nil, err - //} - // - //return wf, err - return nil, nil +func (s *Server) Retry(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + + wfClient, kubeClient, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) + + if err != nil { + return nil, err + } + + wf, err = util.RetryWorkflow(kubeClient, wfClient.ArgoprojV1alpha1().Workflows(namespace), wf) + + if err != nil { + return nil, err + } + return wf, err } func (s *Server) Resubmit(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { namespace := s.Namespace - if in.Workflow.Namespace != "" { - namespace = in.Workflow.Namespace + if in.Namespace != "" { + namespace = in.Namespace } - var wfClientset *versioned.Clientset + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Workflow.Name, v1.GetOptions{}) - wf, err := s.Clientset.ArgoprojV1alpha1().Workflows(namespace).Get(in.Workflow.Name, v1.GetOptions{}) - //errors.CheckError(err) newWF, err := util.FormulateResubmitWorkflow(wf, in.Memoized) - //errors.CheckError(err) - created, err := util.SubmitWorkflow(s.Clientset.ArgoprojV1alpha1().Workflows(namespace), wfClientset, namespace, newWF, nil) + + created, err := util.SubmitWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfClient, namespace, newWF, nil) if err != nil { fmt.Println(err) @@ -176,13 +205,65 @@ func (s *Server) Resubmit(ctx context.Context, in *WorkflowUpdateQuery) (*v1alph } func (s *Server) Resume(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { - return nil, nil + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + + err = util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.Wfname) + if err != nil { + log.Warnf("Failed to resume %s: %+v", in.Wfname, err) + return nil, err + } + + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) + + if err != nil { + return nil, err + } + return wf, nil } func (s *Server) Suspend(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { - return nil, nil + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + err = util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.Wfname) + + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) + if err != nil { + return nil, err + } + return wf, nil } func (s *Server) Terminate(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { - return nil, nil + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + err = util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.Wfname) + + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) + if err != nil { + return nil, err + } + return wf, nil } diff --git a/hack/generate-proto.sh b/hack/generate-proto.sh index 6ba6c6b97b46..b314ebc34931 100755 --- a/hack/generate-proto.sh +++ b/hack/generate-proto.sh @@ -107,7 +107,7 @@ EOF /bin/rm -f "${SWAGGER_OUT}" - /usr/bin/find "cmd/${SWAGGER_ROOT}" -name '*.swagger.json' -exec /usr/local/bin/swagger mixin -c "${EXPECTED_COLLISIONS}" "${PRIMARY_SWAGGER}" '{}' \+ > "${COMBINED_SWAGGER}" +# /usr/bin/find "cmd/${SWAGGER_ROOT}" -name '*.swagger.json' -exec /usr/local/bin/swagger mixin -c "${EXPECTED_COLLISIONS}" "${PRIMARY_SWAGGER}" '{}' \+ > "${COMBINED_SWAGGER}" /usr/local/bin/jq -r 'del(.definitions[].properties[]? | select(."$ref"!=null and .description!=null).description) | del(.definitions[].properties[]? | select(."$ref"!=null and .title!=null).title)' "${COMBINED_SWAGGER}" > "${SWAGGER_OUT}" /bin/rm "${PRIMARY_SWAGGER}" "${COMBINED_SWAGGER}" @@ -116,7 +116,7 @@ EOF # clean up generated swagger files (should come after collect_swagger) clean_swagger() { SWAGGER_ROOT="$1" - /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -delete +# /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -delete } collect_swagger server 21 diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index aac010ed2d7b..a96d0cfedd0d 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -23,6 +23,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ContinueOn": schema_pkg_apis_workflow_v1alpha1_ContinueOn(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTask": schema_pkg_apis_workflow_v1alpha1_DAGTask(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate": schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ExecutorConfig": schema_pkg_apis_workflow_v1alpha1_ExecutorConfig(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.GitArtifact": schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HDFSArtifact": schema_pkg_apis_workflow_v1alpha1_HDFSArtifact(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.HDFSConfig": schema_pkg_apis_workflow_v1alpha1_HDFSConfig(ref), @@ -32,6 +33,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Item": schema_pkg_apis_workflow_v1alpha1_Item(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ItemValue": schema_pkg_apis_workflow_v1alpha1_ItemValue(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata": schema_pkg_apis_workflow_v1alpha1_Metadata(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.NodeStatus": schema_pkg_apis_workflow_v1alpha1_NodeStatus(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.NoneStrategy": schema_pkg_apis_workflow_v1alpha1_NoneStrategy(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs": schema_pkg_apis_workflow_v1alpha1_Outputs(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ParallelSteps": schema_pkg_apis_workflow_v1alpha1_ParallelSteps(ref), @@ -53,6 +55,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Workflow": schema_pkg_apis_workflow_v1alpha1_Workflow(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowList": schema_pkg_apis_workflow_v1alpha1_WorkflowList(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowSpec": schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref), + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStatus": schema_pkg_apis_workflow_v1alpha1_WorkflowStatus(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowStep": schema_pkg_apis_workflow_v1alpha1_WorkflowStep(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowTemplate": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplate(ref), "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.WorkflowTemplateList": schema_pkg_apis_workflow_v1alpha1_WorkflowTemplateList(ref), @@ -93,6 +96,12 @@ func schema_pkg_apis_workflow_v1alpha1_Arguments(ref common.ReferenceCallback) c Type: []string{"object"}, Properties: map[string]spec.Schema{ "parameters": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Parameters is the list of parameters to pass to the template or workflow", Type: []string{"array"}, @@ -106,6 +115,12 @@ func schema_pkg_apis_workflow_v1alpha1_Arguments(ref common.ReferenceCallback) c }, }, "artifacts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Artifacts is the list of artifacts to pass to the template or workflow", Type: []string{"array"}, @@ -512,6 +527,12 @@ func schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref common.ReferenceCallback) }, }, "tasks": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Tasks are a list of DAG tasks", Type: []string{"array"}, @@ -540,6 +561,26 @@ func schema_pkg_apis_workflow_v1alpha1_DAGTemplate(ref common.ReferenceCallback) } } +func schema_pkg_apis_workflow_v1alpha1_ExecutorConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ExecutorConfig holds configurations of an executor container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName specifies the service account name of the executor container.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_workflow_v1alpha1_GitArtifact(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -865,6 +906,12 @@ func schema_pkg_apis_workflow_v1alpha1_Inputs(ref common.ReferenceCallback) comm Type: []string{"object"}, Properties: map[string]spec.Schema{ "parameters": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Parameters are a list of parameters passed as inputs", Type: []string{"array"}, @@ -878,6 +925,12 @@ func schema_pkg_apis_workflow_v1alpha1_Inputs(ref common.ReferenceCallback) comm }, }, "artifacts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Artifact are a list of artifacts passed as inputs", Type: []string{"array"}, @@ -1015,6 +1068,164 @@ func schema_pkg_apis_workflow_v1alpha1_Metadata(ref common.ReferenceCallback) co } } +func schema_pkg_apis_workflow_v1alpha1_NodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeStatus contains status information about an individual node in the workflow", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "id": { + SchemaProps: spec.SchemaProps{ + Description: "ID is a unique identifier of a node within the worklow It is implemented as a hash of the node name, which makes the ID deterministic", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is unique name in the node tree used to generate the node ID", + Type: []string{"string"}, + Format: "", + }, + }, + "displayName": { + SchemaProps: spec.SchemaProps{ + Description: "DisplayName is a human readable representation of the node. Unique within a template boundary", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type indicates type of node", + Type: []string{"string"}, + Format: "", + }, + }, + "templateName": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)", + Type: []string{"string"}, + Format: "", + }, + }, + "templateRef": { + SchemaProps: spec.SchemaProps{ + Description: "TemplateRef is the reference to the template resource which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.TemplateRef"), + }, + }, + "storedTemplateID": { + SchemaProps: spec.SchemaProps{ + Description: "StoredTemplateID is the ID of stored template.", + Type: []string{"string"}, + Format: "", + }, + }, + "workflowTemplateName": { + SchemaProps: spec.SchemaProps{ + Description: "WorkflowTemplateName is the WorkflowTemplate resource name on which the resolved template of this node is retrieved.", + Type: []string{"string"}, + Format: "", + }, + }, + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Phase a simple, high-level summary of where the node is in its lifecycle. Can be used as a state machine.", + Type: []string{"string"}, + Format: "", + }, + }, + "boundaryID": { + SchemaProps: spec.SchemaProps{ + Description: "BoundaryID indicates the node ID of the associated template root node in which this node belongs to", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about why the node is in this condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "startedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which this node started", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "finishedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which this node completed", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "podIP": { + SchemaProps: spec.SchemaProps{ + Description: "PodIP captures the IP of the pod for daemoned steps", + Type: []string{"string"}, + Format: "", + }, + }, + "daemoned": { + SchemaProps: spec.SchemaProps{ + Description: "Daemoned tracks whether or not this node was daemoned and need to be terminated", + Type: []string{"boolean"}, + Format: "", + }, + }, + "inputs": { + SchemaProps: spec.SchemaProps{ + Description: "Inputs captures input parameter values and artifact locations supplied to this template invocation", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs"), + }, + }, + "outputs": { + SchemaProps: spec.SchemaProps{ + Description: "Outputs captures output parameter values and artifact locations produced by this template invocation", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs"), + }, + }, + "children": { + SchemaProps: spec.SchemaProps{ + Description: "Children is a list of child node IDs", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "outboundNodes": { + SchemaProps: spec.SchemaProps{ + Description: "OutboundNodes tracks the node IDs which are considered \"outbound\" nodes to a template invocation. For every invocation of a template, there are nodes which we considered as \"outbound\". Essentially, these are last nodes in the execution sequence to run, before the template is considered completed. These nodes are then connected as parents to a following step.\n\nIn the case of single pod steps (i.e. container, script, resource templates), this list will be nil since the pod itself is already considered the \"outbound\" node. In the case of DAGs, outbound nodes are the \"target\" tasks (tasks with no children). In the case of steps, outbound nodes are all the containers involved in the last step group. NOTE: since templates are composable, the list of outbound nodes are carried upwards when a DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of a template, will be a superset of the outbound nodes of its last children.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"id", "name", "displayName", "type"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.TemplateRef", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + func schema_pkg_apis_workflow_v1alpha1_NoneStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1034,6 +1245,12 @@ func schema_pkg_apis_workflow_v1alpha1_Outputs(ref common.ReferenceCallback) com Type: []string{"object"}, Properties: map[string]spec.Schema{ "parameters": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Parameters holds the list of output parameters produced by a step", Type: []string{"array"}, @@ -1047,6 +1264,12 @@ func schema_pkg_apis_workflow_v1alpha1_Outputs(ref common.ReferenceCallback) com }, }, "artifacts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Artifacts holds the list of output artifacts produced by a step", Type: []string{"array"}, @@ -1314,6 +1537,13 @@ func schema_pkg_apis_workflow_v1alpha1_S3Artifact(ref common.ReferenceCallback) Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, + "roleARN": { + SchemaProps: spec.SchemaProps{ + Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + Type: []string{"string"}, + Format: "", + }, + }, "key": { SchemaProps: spec.SchemaProps{ Description: "Key is the key in the bucket where the artifact resides", @@ -1377,6 +1607,13 @@ func schema_pkg_apis_workflow_v1alpha1_S3Bucket(ref common.ReferenceCallback) co Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, + "roleARN": { + SchemaProps: spec.SchemaProps{ + Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"endpoint", "bucket", "accessKeySecret", "secretKeySecret"}, }, @@ -1810,6 +2047,12 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co }, }, "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Volumes is a list of volumes that can be mounted by containers in a template.", Type: []string{"array"}, @@ -1823,6 +2066,12 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co }, }, "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "InitContainers is a list of containers which run before the main container.", Type: []string{"array"}, @@ -1836,6 +2085,12 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co }, }, "sidecars": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Sidecars is a list of containers which run alongside the main container Sidecars are automatically killed when the main container completes", Type: []string{"array"}, @@ -1875,6 +2130,12 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co }, }, "tolerations": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "key", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Tolerations to apply to workflow pods.", Type: []string{"array"}, @@ -1915,7 +2176,26 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co Format: "", }, }, + "automountServiceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "executor": { + SchemaProps: spec.SchemaProps{ + Description: "Executor holds configurations of the executor container.", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ExecutorConfig"), + }, + }, "hostAliases": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec", Type: []string{"array"}, @@ -1930,8 +2210,7 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co }, "securityContext": { SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, "podSpecPatch": { @@ -1946,7 +2225,7 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactLocation", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ParallelSteps", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ResourceTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ScriptTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.SuspendTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.TemplateRef", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.UserContainer", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactLocation", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.DAGTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ExecutorConfig", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Inputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Metadata", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ParallelSteps", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ResourceTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.RetryStrategy", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ScriptTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.SuspendTemplate", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.TemplateRef", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.UserContainer", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, } } @@ -2349,6 +2628,12 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback Type: []string{"object"}, Properties: map[string]spec.Schema{ "templates": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Templates is a list of workflow templates used in a workflow", Type: []string{"array"}, @@ -2381,7 +2666,26 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback Format: "", }, }, + "automountServiceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "executor": { + SchemaProps: spec.SchemaProps{ + Description: "Executor holds configurations of executor containers of the workflow.", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ExecutorConfig"), + }, + }, "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Volumes is a list of volumes that can be mounted by containers in a workflow.", Type: []string{"array"}, @@ -2395,6 +2699,12 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, }, "volumeClaimTemplates": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow", Type: []string{"array"}, @@ -2449,6 +2759,12 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, }, "tolerations": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "key", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "Tolerations to apply to workflow pods.", Type: []string{"array"}, @@ -2462,6 +2778,12 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, }, "imagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ Description: "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", Type: []string{"array"}, @@ -2550,9 +2872,14 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, }, "hostAliases": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge", + }, + }, SchemaProps: spec.SchemaProps{ - Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec", - Type: []string{"array"}, + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -2580,7 +2907,102 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, }, Dependencies: []string{ - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.PodGC", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Template", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Arguments", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ArtifactRepositoryRef", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.ExecutorConfig", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.PodGC", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Template", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + } +} + +func schema_pkg_apis_workflow_v1alpha1_WorkflowStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkflowStatus contains overall status information about a workflow", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Phase a simple, high-level summary of where the workflow is in its lifecycle.", + Type: []string{"string"}, + Format: "", + }, + }, + "startedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which this workflow started", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "finishedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which this workflow completed", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about why the workflow is in this condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "compressedNodes": { + SchemaProps: spec.SchemaProps{ + Description: "Compressed and base64 decoded Nodes map", + Type: []string{"string"}, + Format: "", + }, + }, + "nodes": { + SchemaProps: spec.SchemaProps{ + Description: "Nodes is a mapping between a node ID and the node's status.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.NodeStatus"), + }, + }, + }, + }, + }, + "storedTemplates": { + SchemaProps: spec.SchemaProps{ + Description: "StoredTemplates is a mapping between a template ref and the node's status.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Template"), + }, + }, + }, + }, + }, + "persistentVolumeClaims": { + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaims tracks all PVCs that were created as part of the workflow. The contents of this list are drained at the end of the workflow.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "outputs": { + SchemaProps: spec.SchemaProps{ + Description: "Outputs captures output values and artifact locations produced by the workflow via global outputs", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.NodeStatus", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Outputs", "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.Template", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index d60335f8e837..7cf14f0b5b1f 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -234,7 +234,7 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont return nil, errors.Wrap(err, "", "Fail to marshal the Pod spec") } - tmpl.PodSpecPatch, err = util.PodSpecPatchMerge(woc.wf, tmpl) + tmpl.PodSpecPatch, err = util. PodSpecPatchMerge(woc.wf, tmpl) if err != nil { return nil, errors.Wrap(err, "", "Fail to marshal the Pod spec") From 7bc86b984be78e6b8145b33b2b6109c7f9eb3da3 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Wed, 6 Nov 2019 14:50:58 -0800 Subject: [PATCH 007/421] 3rd Commit --- api/openapi-spec/swagger.json | 3 +- cmd/server/apiserver/argoserver.go | 50 ++- cmd/server/main.go | 2 + cmd/server/workflow/common.go | 4 +- cmd/server/workflow/workflow.pb.go | 201 ++++++++--- cmd/server/workflow/workflow.pb.gw.go | 18 +- cmd/server/workflow/workflow.proto | 8 +- cmd/server/workflow/workflow.swagger.json | 13 +- cmd/server/workflow/workflow_db_service.go | 81 +++++ cmd/server/workflow/workflow_server.go | 312 ++++++++++++++++++ cmd/server/workflow/workflow_service.go | 145 ++------ .../sqldb/mocks/DBRepository.go | 0 {workflow/persist => persist}/sqldb/sqldb.go | 0 .../sqldb/workflow_repository.go | 73 ++-- pkg/apis/workflow/v1alpha1/generated.proto | 7 +- .../workflow/v1alpha1/openapi_generated.go | 9 +- pkg/apis/workflow/v1alpha1/workflow_types.go | 37 --- workflow/controller/controller.go | 2 +- workflow/controller/operator_persist_test.go | 4 +- 19 files changed, 701 insertions(+), 268 deletions(-) create mode 100644 cmd/server/workflow/workflow_db_service.go create mode 100644 cmd/server/workflow/workflow_server.go rename {workflow/persist => persist}/sqldb/mocks/DBRepository.go (100%) rename {workflow/persist => persist}/sqldb/sqldb.go (100%) rename {workflow/persist => persist}/sqldb/workflow_repository.go (79%) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 631a266a49cf..5771c3de0b98 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -1170,6 +1170,7 @@ "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.ScriptTemplate" }, "securityContext": { + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext" }, "serviceAccountName": { @@ -1537,7 +1538,6 @@ "format": "int64" }, "podGC": { - "description": "PodGC describes the strategy to use when to deleting completed pods", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.PodGC" }, "podPriority": { @@ -1563,7 +1563,6 @@ "type": "string" }, "securityContext": { - "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext" }, "serviceAccountName": { diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index f6e784577e8b..b64c4ee14eb8 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -2,16 +2,22 @@ package apiserver import ( "github.com/argoproj/argo/cmd/server/workflow" + "github.com/argoproj/argo/errors" "github.com/argoproj/argo/pkg/apiclient" "github.com/argoproj/argo/pkg/client/clientset/versioned" + "github.com/argoproj/argo/workflow/common" + "github.com/argoproj/argo/workflow/config" golang_proto "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/prometheus/common/log" "golang.org/x/net/context" "google.golang.org/grpc" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "net" "regexp" + "sigs.k8s.io/yaml" "fmt" "k8s.io/client-go/kubernetes" @@ -22,8 +28,10 @@ import ( type ArgoServer struct { Namespace string KubeClientset kubernetes.Clientset - wfClientSet *versioned.Clientset + WfClientSet *versioned.Clientset EnableClientAuth bool + Config *config.WorkflowControllerConfig + ConfigName string } type ArgoServerOpts struct { @@ -31,11 +39,13 @@ type ArgoServerOpts struct { Namespace string KubeClientset *versioned.Clientset EnableClientAuth bool + ConfigName string } func NewArgoServer(ctx context.Context, opts ArgoServerOpts) *ArgoServer { - return &ArgoServer{Namespace: opts.Namespace, wfClientSet: opts.KubeClientset, EnableClientAuth: opts.EnableClientAuth} + return &ArgoServer{Namespace: opts.Namespace, WfClientSet: opts.KubeClientset, + EnableClientAuth: opts.EnableClientAuth, ConfigName:opts.ConfigName} } var backoff = wait.Backoff{ @@ -67,17 +77,21 @@ func (as *ArgoServer) newGRPCServer() *grpc.Server { } grpcS := grpc.NewServer(sOpts...) - workflowService := workflow.NewServer(as.Namespace, *as.wfClientSet, as.EnableClientAuth) - workflow.RegisterWorkflowServiceServer(grpcS, workflowService) + configMap, err := as.RsyncConfig(as.Namespace, as.WfClientSet, &as.KubeClientset) + if err != nil { + panic("Error marshalling config map") + } + workflowServer := workflow.NewWorkflowServer(as.Namespace, as.WfClientSet, &as.KubeClientset, configMap, as.EnableClientAuth) + workflow.RegisterWorkflowServiceServer(grpcS, workflowServer) return grpcS } //// newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented //// using grpc-gateway as a proxy to the gRPC server. -//func (a *ArgoServer) newHTTPServer(ctx context.Context, port int, grpcWebHandler http.Handler) *http.Server { +//func (a *ArgoServer) newHTTPServer(ctx context.Context, port int, grpcWebHandler http.Handler) *http.KubeService { // endpoint := fmt.Sprintf("localhost:%d", port) // mux := http.NewServeMux() -// httpS := http.Server{ +// httpS := http.KubeService{ // Addr: endpoint, // Handler: &handlerSwitcher{ // handler: &bug21955Workaround{handler: mux}, @@ -174,3 +188,27 @@ func (a *ArgoServer) translateGrpcCookieHeader(ctx context.Context, w http.Respo return nil } + +// ResyncConfig reloads the controller config from the configmap +func (a *ArgoServer) RsyncConfig(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset)(*config.WorkflowControllerConfig, error){ + cmClient := kubeClientSet.CoreV1().ConfigMaps(namespace) + cm, err := cmClient.Get(a.ConfigName, metav1.GetOptions{}) + if err != nil { + return nil, errors.InternalWrapError(err) + } + return a.UpdateConfig(cm) +} + +func (a *ArgoServer) UpdateConfig(cm *apiv1.ConfigMap)(*config.WorkflowControllerConfig, error){ + configStr, ok := cm.Data[common.WorkflowControllerConfigMapKey] + if !ok { + return nil, errors.InternalErrorf("ConfigMap '%s' does not have key '%s'", a.ConfigName, common.WorkflowControllerConfigMapKey) + } + var config config.WorkflowControllerConfig + err := yaml.Unmarshal([]byte(configStr), &config) + if err != nil { + return nil, errors.InternalWrapError(err) + } + return &config, nil +} + diff --git a/cmd/server/main.go b/cmd/server/main.go index fb99340801d1..9442e7d91cda 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -31,6 +31,7 @@ func NewRootCommand() *cobra.Command { clientConfig clientcmd.ClientConfig logLevel string // --loglevel enableClientAuth string + configMap string ) var command = cobra.Command{ @@ -76,6 +77,7 @@ func NewRootCommand() *cobra.Command { clientConfig = kubecli.AddKubectlFlagsToCmd(&command) command.AddCommand(cmdutil.NewVersionCmd(CLIName)) command.Flags().StringVar(&enableClientAuth, "enableClientAuth", "false", "") + command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration") command.Flags().StringVar(&logLevel, "loglevel", "debug", "Set the logging level. One of: debug|info|warn|error") return &command } diff --git a/cmd/server/workflow/common.go b/cmd/server/workflow/common.go index 61c0f51a6dcc..bafb56a0b47f 100644 --- a/cmd/server/workflow/common.go +++ b/cmd/server/workflow/common.go @@ -25,11 +25,11 @@ type ClientConfig struct { // sent to the server. rest.ContentConfig - // Server requires Basic authentication + // KubeService requires Basic authentication Username string Password string - // Server requires Bearer authentication. This client will not attempt to use + // KubeService requires Bearer authentication. This client will not attempt to use // refresh tokens for an OAuth2 flow. // TODO: demonstrate an OAuth2 compatible client. BearerToken string diff --git a/cmd/server/workflow/workflow.pb.go b/cmd/server/workflow/workflow.pb.go index cbb8371e1b9e..da6cade80623 100644 --- a/cmd/server/workflow/workflow.pb.go +++ b/cmd/server/workflow/workflow.pb.go @@ -35,8 +35,9 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type WorkflowCreateRequest struct { - Workflows *v1alpha1.Workflow `protobuf:"bytes,1,opt,name=Workflows,proto3" json:"Workflows,omitempty"` - CreateOptions *v1.CreateOptions `protobuf:"bytes,2,opt,name=CreateOptions,proto3" json:"CreateOptions,omitempty"` + Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + Workflows *v1alpha1.Workflow `protobuf:"bytes,2,opt,name=Workflows,proto3" json:"Workflows,omitempty"` + CreateOptions *v1.CreateOptions `protobuf:"bytes,3,opt,name=CreateOptions,proto3" json:"CreateOptions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -75,6 +76,13 @@ func (m *WorkflowCreateRequest) XXX_DiscardUnknown() { var xxx_messageInfo_WorkflowCreateRequest proto.InternalMessageInfo +func (m *WorkflowCreateRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + func (m *WorkflowCreateRequest) GetWorkflows() *v1alpha1.Workflow { if m != nil { return m.Workflows @@ -210,6 +218,7 @@ func (m *WorkflowListRequest) GetListOptions() *v1.ListOptions { type WorkflowUpdateRequest struct { WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + Memoized bool `protobuf:"varint,3,opt,name=Memoized,proto3" json:"Memoized,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -262,6 +271,13 @@ func (m *WorkflowUpdateRequest) GetNamespace() string { return "" } +func (m *WorkflowUpdateRequest) GetMemoized() bool { + if m != nil { + return m.Memoized + } + return false +} + type WorkflowLogRequest struct { WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` @@ -504,56 +520,57 @@ func init() { func init() { proto.RegisterFile("cmd/server/workflow/workflow.proto", fileDescriptor_192bc67c39cca05a) } var fileDescriptor_192bc67c39cca05a = []byte{ - // 783 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0xcf, 0x6b, 0x13, 0x41, - 0x14, 0xc7, 0x99, 0xb6, 0xa6, 0xcd, 0xb4, 0x22, 0x8e, 0x5a, 0x42, 0x48, 0x7f, 0xb0, 0x20, 0x48, - 0x29, 0x3b, 0x4d, 0x5b, 0x50, 0x0a, 0x1e, 0xb4, 0x4a, 0x05, 0xa3, 0x96, 0x4d, 0x45, 0xaa, 0xa7, - 0x6d, 0xf2, 0xdc, 0xae, 0xc9, 0xee, 0xac, 0x3b, 0x93, 0x94, 0x12, 0x72, 0xd0, 0x83, 0x78, 0x52, - 0xd0, 0x8b, 0xde, 0xfc, 0x75, 0xf0, 0x0f, 0xf0, 0x8f, 0xf0, 0xa6, 0xe0, 0x3f, 0x20, 0xc5, 0x3f, - 0x44, 0x66, 0xb2, 0x3f, 0x9b, 0xb4, 0xa4, 0xa4, 0x87, 0x9c, 0x76, 0x76, 0xde, 0xbe, 0xf7, 0x3e, - 0xfb, 0xe6, 0x3b, 0x6f, 0x06, 0x6b, 0x15, 0xa7, 0x4a, 0x39, 0xf8, 0x4d, 0xf0, 0xe9, 0x1e, 0xf3, - 0x6b, 0x4f, 0xeb, 0x6c, 0x2f, 0x1a, 0xe8, 0x9e, 0xcf, 0x04, 0x23, 0x13, 0xe1, 0x7b, 0xfe, 0xa2, - 0xc5, 0x2c, 0xa6, 0x26, 0xa9, 0x1c, 0x75, 0xec, 0xf9, 0x82, 0xc5, 0x98, 0x55, 0x07, 0x6a, 0x7a, - 0x36, 0x35, 0x5d, 0x97, 0x09, 0x53, 0xd8, 0xcc, 0xe5, 0x81, 0x75, 0xb5, 0x76, 0x8d, 0xeb, 0x36, - 0x93, 0x56, 0xc7, 0xac, 0xec, 0xda, 0x2e, 0xf8, 0xfb, 0xd4, 0xab, 0x59, 0x72, 0x82, 0x53, 0x07, - 0x84, 0x49, 0x9b, 0x45, 0x6a, 0x81, 0x0b, 0xbe, 0x29, 0xa0, 0x1a, 0x78, 0xad, 0x5b, 0xb6, 0xd8, - 0x6d, 0xec, 0xe8, 0x15, 0xe6, 0x50, 0xd3, 0x57, 0x49, 0x9f, 0xa9, 0x41, 0xec, 0x1a, 0xe1, 0x36, - 0x8b, 0x66, 0xdd, 0xdb, 0x35, 0xbb, 0x82, 0x68, 0xbf, 0x10, 0xbe, 0xf4, 0x28, 0xf8, 0x6a, 0xdd, - 0x07, 0x53, 0x80, 0x01, 0xcf, 0x1b, 0xc0, 0x05, 0x79, 0x82, 0xb3, 0xa1, 0x81, 0xe7, 0xd0, 0x3c, - 0xba, 0x32, 0xb9, 0x7c, 0x5d, 0x8f, 0x53, 0xea, 0x61, 0x4a, 0x35, 0xd0, 0xbd, 0x9a, 0xa5, 0xcb, - 0x94, 0x7a, 0x54, 0x98, 0x30, 0xa5, 0x1e, 0x46, 0x31, 0xe2, 0x78, 0x64, 0x1b, 0x9f, 0xed, 0x64, - 0x7b, 0xe0, 0xa9, 0x42, 0xe4, 0x46, 0x54, 0x82, 0x15, 0xbd, 0x53, 0x09, 0x3d, 0x59, 0x89, 0x38, - 0xb6, 0xac, 0x84, 0xde, 0x2c, 0xea, 0x29, 0x57, 0x23, 0x1d, 0x49, 0xfb, 0x8e, 0x30, 0x09, 0x13, - 0x6d, 0x80, 0x08, 0x7f, 0x47, 0xc3, 0x53, 0xe1, 0xec, 0x7d, 0xd3, 0x01, 0xf5, 0x47, 0x59, 0x23, - 0x35, 0x47, 0x0a, 0x38, 0x2b, 0x9f, 0xdc, 0x33, 0x2b, 0xa0, 0x88, 0xb2, 0x46, 0x3c, 0x41, 0x36, - 0x31, 0xde, 0x00, 0x11, 0x02, 0x8f, 0x2a, 0xe0, 0xa5, 0xfe, 0x80, 0x63, 0x3f, 0x23, 0x11, 0x43, - 0x7b, 0x8d, 0xf0, 0x85, 0x10, 0xa0, 0x64, 0xf3, 0x88, 0x35, 0xc5, 0x81, 0x0e, 0x73, 0x94, 0xf1, - 0xa4, 0xfc, 0x38, 0x5d, 0xb9, 0x62, 0x7f, 0x20, 0x09, 0x47, 0x23, 0x19, 0x45, 0xdb, 0x8e, 0x65, - 0xf0, 0xd0, 0xab, 0x26, 0x64, 0x30, 0x70, 0xdd, 0xb4, 0x37, 0x89, 0x05, 0x29, 0x31, 0xeb, 0xf4, - 0x16, 0x24, 0x87, 0xc7, 0x37, 0x59, 0x55, 0x39, 0x8f, 0x2a, 0x5b, 0xf8, 0x2a, 0xfd, 0xd6, 0x99, - 0x2b, 0x4c, 0x59, 0x85, 0xdc, 0x58, 0xc7, 0x2f, 0x9a, 0xd0, 0x7e, 0x24, 0x34, 0x7f, 0x0b, 0xea, - 0x70, 0x8a, 0x3f, 0x2b, 0x85, 0xdd, 0x09, 0x99, 0xd6, 0x49, 0x9f, 0xc2, 0x4e, 0xb9, 0x1a, 0xe9, - 0x48, 0x5a, 0x0e, 0x4f, 0x1f, 0xa6, 0xe6, 0x1e, 0x73, 0x39, 0x68, 0x2e, 0x9e, 0x28, 0x31, 0xeb, - 0xb6, 0x2b, 0xfc, 0x7d, 0x59, 0x94, 0x0a, 0x73, 0x05, 0xb8, 0x22, 0xa0, 0x0f, 0x5f, 0xc9, 0x1d, - 0x9c, 0x15, 0xb6, 0x03, 0x65, 0x61, 0x3a, 0x5e, 0xa0, 0x9a, 0x85, 0xfe, 0xb0, 0xb6, 0x6c, 0x07, - 0x8c, 0xd8, 0x79, 0xf9, 0xe3, 0x14, 0x3e, 0x17, 0xa2, 0x94, 0xc1, 0x6f, 0xda, 0x15, 0x20, 0xaf, - 0x10, 0xce, 0x74, 0x36, 0x22, 0x99, 0x8b, 0x9b, 0x40, 0xcf, 0xd6, 0x92, 0x1f, 0xac, 0x8f, 0x68, - 0x85, 0x97, 0x7f, 0xfe, 0xbd, 0x1f, 0x99, 0xd6, 0xce, 0xab, 0x76, 0xda, 0x2c, 0x46, 0x6d, 0x8e, - 0xaf, 0xa1, 0x05, 0xf2, 0x01, 0xe1, 0xd1, 0x0d, 0x10, 0xa4, 0xd0, 0x4d, 0x11, 0xb7, 0x83, 0x41, - 0x11, 0x56, 0x15, 0x82, 0x4e, 0x16, 0xbb, 0x10, 0x68, 0x2b, 0x12, 0x43, 0x9b, 0xb6, 0x92, 0xca, - 0x69, 0x93, 0xb7, 0x08, 0x8f, 0xc9, 0x4d, 0x47, 0x66, 0xba, 0xd9, 0x12, 0xfb, 0x3f, 0x7f, 0x63, - 0x20, 0x38, 0x19, 0x49, 0xbb, 0xac, 0x00, 0xe7, 0xc8, 0xcc, 0xb1, 0x80, 0xe4, 0x05, 0xc2, 0x99, - 0x8e, 0x98, 0x7a, 0xad, 0x5a, 0x6a, 0x73, 0xe4, 0xe7, 0x8f, 0xfe, 0x20, 0xd0, 0x61, 0x50, 0x95, - 0x85, 0x93, 0x55, 0xe5, 0x13, 0xc2, 0x67, 0x0c, 0x90, 0xda, 0xed, 0x81, 0x90, 0x6a, 0x46, 0x83, - 0xae, 0xda, 0x55, 0xc5, 0x57, 0xcc, 0x9f, 0x88, 0x4f, 0x6a, 0xea, 0x2b, 0xc2, 0x13, 0x06, 0xf0, - 0xc6, 0x8e, 0x63, 0x8b, 0xe1, 0xa5, 0xfc, 0x8c, 0x70, 0x46, 0x52, 0x3a, 0x30, 0xbc, 0x8c, 0x5f, - 0x10, 0x1e, 0x2f, 0x37, 0xb8, 0x07, 0x6e, 0x75, 0x78, 0x21, 0xbf, 0x21, 0x9c, 0xdd, 0x02, 0xdf, - 0xb1, 0xdd, 0x23, 0xda, 0xd9, 0x70, 0x60, 0xbe, 0x43, 0xea, 0x00, 0x2c, 0x31, 0x8b, 0xf7, 0xea, - 0x76, 0xf1, 0x59, 0x9b, 0x27, 0xb1, 0x35, 0x3c, 0x28, 0xb4, 0xb2, 0x4a, 0x7b, 0x8f, 0xdc, 0x3d, - 0x9c, 0xf6, 0x98, 0xac, 0xd4, 0x63, 0x55, 0x4e, 0x5b, 0xc1, 0x31, 0xdb, 0xa6, 0x75, 0x66, 0x71, - 0xda, 0x8a, 0x4e, 0xd6, 0xf6, 0x12, 0xba, 0xb9, 0xf6, 0xf3, 0x60, 0x16, 0xfd, 0x3e, 0x98, 0x45, - 0x7f, 0x0f, 0x66, 0xd1, 0xe3, 0xc5, 0x23, 0xef, 0xa8, 0x3d, 0x2e, 0xd5, 0x3b, 0x19, 0x75, 0x27, - 0x5d, 0xf9, 0x1f, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x30, 0x03, 0x3e, 0x72, 0x0b, 0x00, 0x00, + // 796 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0xcf, 0x6e, 0xd3, 0x4e, + 0x10, 0xc7, 0xb5, 0x6d, 0x7f, 0x69, 0xb2, 0x6d, 0xf5, 0x13, 0x0b, 0x54, 0x91, 0x95, 0xfe, 0x91, + 0x25, 0xa4, 0xaa, 0xaa, 0xec, 0xa6, 0xad, 0x04, 0xaa, 0x84, 0x50, 0x29, 0x28, 0x1c, 0x02, 0x54, + 0x4e, 0x11, 0x2a, 0x9c, 0xdc, 0x64, 0x70, 0x4d, 0x62, 0xaf, 0xf1, 0x6e, 0x52, 0x95, 0xaa, 0x07, + 0x38, 0x54, 0x5c, 0x80, 0x03, 0x17, 0x8e, 0xfc, 0x3b, 0xf0, 0x00, 0x3c, 0x04, 0x47, 0x24, 0x5e, + 0x00, 0x55, 0x5c, 0x79, 0x07, 0xb4, 0x6b, 0x3b, 0xb6, 0x9b, 0xb4, 0x4a, 0x49, 0x0f, 0xb9, 0xed, + 0xce, 0x7a, 0x66, 0x3e, 0xfe, 0xee, 0xec, 0xec, 0x62, 0xb5, 0xea, 0xd4, 0x74, 0x06, 0x7e, 0x0b, + 0x7c, 0x7d, 0x97, 0xfa, 0xf5, 0x27, 0x0d, 0xba, 0xdb, 0x1e, 0x68, 0x9e, 0x4f, 0x39, 0x25, 0xd9, + 0x68, 0xae, 0x5c, 0xb2, 0xa8, 0x45, 0xa5, 0x51, 0x17, 0xa3, 0x60, 0x5d, 0x29, 0x58, 0x94, 0x5a, + 0x0d, 0xd0, 0x4d, 0xcf, 0xd6, 0x4d, 0xd7, 0xa5, 0xdc, 0xe4, 0x36, 0x75, 0x59, 0xb8, 0xba, 0x52, + 0xbf, 0xc6, 0x34, 0x9b, 0x8a, 0x55, 0xc7, 0xac, 0xee, 0xd8, 0x2e, 0xf8, 0x7b, 0xba, 0x57, 0xb7, + 0x84, 0x81, 0xe9, 0x0e, 0x70, 0x53, 0x6f, 0x15, 0x75, 0x0b, 0x5c, 0xf0, 0x4d, 0x0e, 0xb5, 0xd0, + 0x6b, 0xdd, 0xb2, 0xf9, 0x4e, 0x73, 0x5b, 0xab, 0x52, 0x47, 0x37, 0x7d, 0x99, 0xf4, 0xa9, 0x1c, + 0xc4, 0xae, 0x6d, 0xdc, 0x56, 0xd1, 0x6c, 0x78, 0x3b, 0x66, 0x47, 0x10, 0xf5, 0x0f, 0xc2, 0x97, + 0x1f, 0x86, 0x5f, 0xad, 0xfb, 0x60, 0x72, 0x30, 0xe0, 0x59, 0x13, 0x18, 0x27, 0x05, 0x9c, 0xbb, + 0x67, 0x3a, 0xc0, 0x3c, 0xb3, 0x0a, 0x79, 0x34, 0x8b, 0xe6, 0x72, 0x46, 0x6c, 0x20, 0x8f, 0x71, + 0x2e, 0x72, 0x63, 0xf9, 0xa1, 0x59, 0x34, 0x37, 0xb6, 0x74, 0x5d, 0x8b, 0x81, 0xb4, 0x08, 0x48, + 0x0e, 0x34, 0xaf, 0x6e, 0x69, 0x02, 0x48, 0x6b, 0xcb, 0x16, 0x01, 0x69, 0x51, 0x14, 0x23, 0x8e, + 0x47, 0xb6, 0xf0, 0x44, 0xc0, 0x72, 0xdf, 0x93, 0x32, 0xe5, 0x87, 0x65, 0x82, 0x65, 0x2d, 0xd0, + 0x49, 0x4b, 0xea, 0x14, 0xc7, 0x16, 0x3a, 0x69, 0xad, 0xa2, 0x96, 0x72, 0x35, 0xd2, 0x91, 0xd4, + 0xaf, 0x08, 0x93, 0x28, 0x51, 0x09, 0x78, 0xf4, 0xb3, 0x2a, 0x1e, 0x8f, 0xac, 0xe2, 0x1f, 0xc3, + 0xff, 0x4d, 0xd9, 0xd2, 0x82, 0x0c, 0x1d, 0x17, 0x64, 0x03, 0xe3, 0x12, 0xf0, 0x34, 0xf0, 0x62, + 0x6f, 0xc0, 0xb1, 0x9f, 0x91, 0x88, 0xa1, 0xbe, 0x42, 0xf8, 0x62, 0x04, 0x50, 0xb6, 0x19, 0xef, + 0x6d, 0x63, 0x2a, 0x78, 0x4c, 0x7c, 0x1c, 0x81, 0x04, 0x5b, 0x53, 0xec, 0x0d, 0x24, 0xe1, 0x68, + 0x24, 0xa3, 0xa8, 0xcd, 0xb8, 0x48, 0x1e, 0x78, 0xb5, 0x44, 0x91, 0xf4, 0xaf, 0x9b, 0x82, 0xb3, + 0x77, 0xc1, 0xa1, 0xf6, 0x73, 0xa8, 0x49, 0xd5, 0xb2, 0x46, 0x7b, 0xae, 0xbe, 0x49, 0x6c, 0x56, + 0x99, 0x5a, 0xe7, 0x97, 0x34, 0x8f, 0x47, 0x37, 0x68, 0x4d, 0x3a, 0x0f, 0xcb, 0xb5, 0x68, 0x2a, + 0xfc, 0xd6, 0xa9, 0xcb, 0x4d, 0xa1, 0x50, 0x7e, 0x24, 0xf0, 0x6b, 0x1b, 0xd4, 0x6f, 0x89, 0xd3, + 0x72, 0x0b, 0x1a, 0x70, 0x9e, 0x42, 0x6c, 0xe1, 0x89, 0x20, 0xe4, 0x3f, 0x15, 0x7d, 0xca, 0xd5, + 0x48, 0x47, 0x52, 0xf3, 0x78, 0xf2, 0x38, 0x35, 0xf3, 0xa8, 0xcb, 0x40, 0x75, 0x71, 0xb6, 0x4c, + 0xad, 0xdb, 0x2e, 0xf7, 0xf7, 0x84, 0x28, 0x55, 0xea, 0x72, 0x70, 0x79, 0x48, 0x1f, 0x4d, 0xc9, + 0x1d, 0x9c, 0xe3, 0xb6, 0x03, 0x15, 0x6e, 0x3a, 0x5e, 0x58, 0x51, 0xf3, 0xbd, 0x61, 0x6d, 0xda, + 0x0e, 0x18, 0xb1, 0xf3, 0xd2, 0xeb, 0x71, 0xfc, 0x7f, 0x84, 0x52, 0x01, 0xbf, 0x65, 0x57, 0x81, + 0x1c, 0x22, 0x9c, 0x09, 0x0e, 0x29, 0x99, 0x89, 0x1b, 0x44, 0xd7, 0xa6, 0xa4, 0xf4, 0xd7, 0x63, + 0xd4, 0xc2, 0xcb, 0x9f, 0xbf, 0xdf, 0x0d, 0x4d, 0xaa, 0x17, 0x64, 0x23, 0x6e, 0x15, 0xdb, 0x0d, + 0x92, 0xad, 0xa2, 0x79, 0xf2, 0x1e, 0xe1, 0xe1, 0x12, 0x70, 0x52, 0xe8, 0xa4, 0x88, 0x5b, 0x45, + 0xbf, 0x08, 0x2b, 0x12, 0x41, 0x23, 0x0b, 0x1d, 0x08, 0xfa, 0x7e, 0xbb, 0x18, 0x0e, 0xf4, 0xfd, + 0x64, 0xe5, 0x1c, 0x90, 0xb7, 0x08, 0x8f, 0x88, 0x03, 0x49, 0xa6, 0x3a, 0xd9, 0x12, 0xbd, 0x41, + 0x59, 0xeb, 0x0b, 0x4e, 0x44, 0x52, 0xaf, 0x48, 0xc0, 0x19, 0x32, 0x75, 0x2a, 0x20, 0x79, 0x81, + 0x70, 0x26, 0x28, 0xa6, 0x6e, 0xbb, 0x96, 0x3a, 0x1c, 0xca, 0xec, 0xc9, 0x1f, 0x84, 0x75, 0x18, + 0xaa, 0x32, 0x7f, 0x36, 0x55, 0x3e, 0x20, 0xfc, 0x9f, 0x01, 0xa2, 0x76, 0xbb, 0x20, 0xa4, 0x1a, + 0x55, 0xbf, 0xbb, 0x76, 0x55, 0xf2, 0x15, 0x95, 0x33, 0xf1, 0x89, 0x9a, 0xfa, 0x8c, 0x70, 0xd6, + 0x00, 0xd6, 0xdc, 0x76, 0x6c, 0x3e, 0xb8, 0x94, 0x1f, 0x11, 0xce, 0x08, 0x4a, 0x07, 0x06, 0x97, + 0xf1, 0x13, 0xc2, 0xa3, 0x95, 0x26, 0xf3, 0xc0, 0xad, 0x0d, 0x2e, 0xe4, 0x17, 0x84, 0x73, 0x9b, + 0xe0, 0x3b, 0xb6, 0x7b, 0x42, 0x3b, 0x1b, 0x0c, 0xcc, 0x43, 0x24, 0x2f, 0xc0, 0x32, 0xb5, 0x58, + 0xb7, 0x6e, 0x17, 0xdf, 0xb5, 0x0a, 0x89, 0x57, 0xa3, 0x8b, 0x42, 0x2d, 0xc9, 0xb4, 0x6b, 0xe4, + 0xc6, 0xf1, 0xb4, 0xa7, 0x64, 0xd5, 0x3d, 0x5a, 0x63, 0xfa, 0x7e, 0x78, 0xcd, 0x1e, 0xe8, 0x0d, + 0x6a, 0xb1, 0x45, 0x74, 0x73, 0xf5, 0xfb, 0xd1, 0x34, 0xfa, 0x71, 0x34, 0x8d, 0x7e, 0x1d, 0x4d, + 0xa3, 0x47, 0x0b, 0x27, 0xbe, 0x68, 0xbb, 0x3c, 0xc1, 0xb7, 0x33, 0xf2, 0x05, 0xbb, 0xfc, 0x37, + 0x00, 0x00, 0xff, 0xff, 0xa9, 0xf3, 0x79, 0xef, 0xa0, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1024,7 +1041,7 @@ func (m *WorkflowCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintWorkflow(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } if m.Workflows != nil { { @@ -1036,6 +1053,13 @@ func (m *WorkflowCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintWorkflow(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Namespace))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil @@ -1164,6 +1188,16 @@ func (m *WorkflowUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.Memoized { + i-- + if m.Memoized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } if len(m.Namespace) > 0 { i -= len(m.Namespace) copy(dAtA[i:], m.Namespace) @@ -1379,6 +1413,10 @@ func (m *WorkflowCreateRequest) Size() (n int) { } var l int _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } if m.Workflows != nil { l = m.Workflows.Size() n += 1 + l + sovWorkflow(uint64(l)) @@ -1451,6 +1489,9 @@ func (m *WorkflowUpdateRequest) Size() (n int) { if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } + if m.Memoized { + n += 2 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1577,6 +1618,38 @@ func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) } @@ -1612,7 +1685,7 @@ func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CreateOptions", wireType) } @@ -2042,6 +2115,26 @@ func (m *WorkflowUpdateRequest) Unmarshal(dAtA []byte) error { } m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Memoized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Memoized = bool(v != 0) default: iNdEx = preIndex skippy, err := skipWorkflow(dAtA[iNdEx:]) diff --git a/cmd/server/workflow/workflow.pb.gw.go b/cmd/server/workflow/workflow.pb.gw.go index 0dbd8ee811e5..a6a88a09cc02 100644 --- a/cmd/server/workflow/workflow.pb.gw.go +++ b/cmd/server/workflow/workflow.pb.gw.go @@ -773,6 +773,10 @@ func local_request_WorkflowService_Terminate_0(ctx context.Context, marshaler ru } +var ( + filter_WorkflowService_PodLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0, "WorkflowName": 1, "PodName": 2}, Base: []int{1, 1, 2, 3, 0, 0, 0}, Check: []int{0, 1, 1, 1, 2, 3, 4}} +) + func request_WorkflowService_PodLogs_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (WorkflowService_PodLogsClient, runtime.ServerMetadata, error) { var protoReq WorkflowLogRequest var metadata runtime.ServerMetadata @@ -817,15 +821,11 @@ func request_WorkflowService_PodLogs_0(ctx context.Context, marshaler runtime.Ma return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "PodName", err) } - val, ok = pathParams["Container"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Container") + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - - protoReq.Container, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Container", err) + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_PodLogs_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } stream, err := client.PodLogs(ctx, &protoReq) @@ -1296,7 +1296,7 @@ var ( pattern_WorkflowService_Terminate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_PodLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6, 2, 7, 1, 0, 4, 1, 5, 8}, []string{"api", "v1", "workflow", "Namespace", "WorkflowName", "pods", "PodName", "logs", "Container"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_PodLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6, 2, 7}, []string{"api", "v1", "workflow", "Namespace", "WorkflowName", "pods", "PodName", "logs"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( diff --git a/cmd/server/workflow/workflow.proto b/cmd/server/workflow/workflow.proto index 07c316896b93..b2cf2e2feea5 100644 --- a/cmd/server/workflow/workflow.proto +++ b/cmd/server/workflow/workflow.proto @@ -13,8 +13,9 @@ import "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1/generated.proto"; package workflow; message WorkflowCreateRequest{ - github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow Workflows =1; - k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions CreateOptions =2; + string Namespace = 1; + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow Workflows = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions CreateOptions = 3; } message WorkflowGetRequest{ @@ -31,6 +32,7 @@ message WorkflowListRequest{ message WorkflowUpdateRequest{ string WorkflowName =1; string Namespace = 2; + bool Memoized = 3; } message WorkflowLogRequest{ @@ -111,7 +113,7 @@ service WorkflowService { // PodLogs returns stream of log entries for the specified pod. Pod rpc PodLogs(WorkflowLogRequest) returns (stream LogEntry) { - option (google.api.http).get = "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs/{Container}"; + option (google.api.http).get = "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs"; } } \ No newline at end of file diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json index 9c3378131396..466df7bf046c 100644 --- a/cmd/server/workflow/workflow.swagger.json +++ b/cmd/server/workflow/workflow.swagger.json @@ -16,7 +16,7 @@ "application/json" ], "paths": { - "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs/{Container}": { + "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs": { "get": { "summary": "PodLogs returns stream of log entries for the specified pod. Pod", "operationId": "PodLogs", @@ -49,8 +49,8 @@ }, { "name": "Container", - "in": "path", - "required": true, + "in": "query", + "required": false, "type": "string" } ], @@ -4022,6 +4022,9 @@ "workflowWorkflowCreateRequest": { "type": "object", "properties": { + "Namespace": { + "type": "string" + }, "Workflows": { "$ref": "#/definitions/v1alpha1Workflow" }, @@ -4041,6 +4044,10 @@ }, "Namespace": { "type": "string" + }, + "Memoized": { + "type": "boolean", + "format": "boolean" } } }, diff --git a/cmd/server/workflow/workflow_db_service.go b/cmd/server/workflow/workflow_db_service.go new file mode 100644 index 000000000000..3f0a73040c00 --- /dev/null +++ b/cmd/server/workflow/workflow_db_service.go @@ -0,0 +1,81 @@ +package workflow + +import ( + "github.com/argoproj/argo/errors" + "github.com/argoproj/argo/persist/sqldb" + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/workflow/config" + log "github.com/sirupsen/logrus" + "k8s.io/client-go/kubernetes" + dblib "upper.io/db.v3" +) + +type DBService struct { + wfDBctx sqldb.DBRepository +} + +func NewDBService(kubectlConfig kubernetes.Interface, namespace string, persistConfig *config.PersistConfig) (*DBService, error) { + var dbService DBService + var err error + dbService.wfDBctx, err = createDBContext(kubectlConfig, namespace, persistConfig) + if err != nil { + return nil, err + } + return &dbService, nil +} + +func createDBContext(kubectlConfig kubernetes.Interface, namespace string, persistConfig *config.PersistConfig) (*sqldb.WorkflowDBContext, error) { + var wfDBCtx sqldb.WorkflowDBContext + var err error + + wfDBCtx.Session, wfDBCtx.TableName, err = sqldb.CreateDBSession(kubectlConfig, namespace, persistConfig) + + if err != nil { + log.Errorf("Error in CreateDBContext. %v", err) + return nil, err + } + return &wfDBCtx, nil +} + +func (db *DBService) Get(wfName string, namespace string) (*v1alpha1.Workflow, error) { + if db.wfDBctx == nil { + return nil, errors.New(errors.CodeInternal, "DB Context is not initialized") + } + + cond := dblib.Cond{"name": wfName, "namespace": namespace} + + wfs, err := db.wfDBctx.Query(cond) + if err != nil { + return nil, err + } + if len(wfs) > 0 { + return &wfs[0], nil + } + return nil, nil +} + +func (db *DBService) List(namespace string, pageSize uint, lastId string) (*v1alpha1.WorkflowList, error) { + if db.wfDBctx == nil { + return nil, errors.New(errors.CodeInternal, "DB Context is not initialized") + } + + var wfList *v1alpha1.WorkflowList + + var err error + + var cond dblib.Cond + if namespace != "" { + cond = dblib.Cond{"namespace": namespace} + } + if pageSize == 0 { + wfList.Items, err = db.wfDBctx.Query(cond) + + } else { + wfList, err = db.wfDBctx.QueryWithPagination(cond, pageSize, lastId) + } + if err != nil { + return nil, err + } + + return wfList, err +} diff --git a/cmd/server/workflow/workflow_server.go b/cmd/server/workflow/workflow_server.go new file mode 100644 index 000000000000..a6ee26fdf83a --- /dev/null +++ b/cmd/server/workflow/workflow_server.go @@ -0,0 +1,312 @@ +package workflow + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/argoproj/argo/persist/sqldb" + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/pkg/client/clientset/versioned" + wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" + "github.com/argoproj/argo/workflow/config" + "github.com/argoproj/argo/workflow/util" + log "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type WorkflowServer struct { + Namespace string + WfClientset *versioned.Clientset + KubeClientset *kubernetes.Clientset + EnableClientAuth bool + Config *config.WorkflowControllerConfig + WfDBService *DBService + WfKubeService *KubeService +} + + + +func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) (*WorkflowServer) { + + wfServer := WorkflowServer{Namespace: namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} + var err error + wfServer.WfDBService.wfDBctx, err = wfServer.CreatePersistenceContext(namespace, kubeClientSet,config.Persistence) + + if err != nil { + log.Errorf("Error Creating DB Context. %v", err) + return nil + } + return &wfServer +} + + +func (s *WorkflowServer) CreatePersistenceContext(namespace string, kubeClientSet *kubernetes.Clientset, config *config.PersistConfig) (*sqldb.WorkflowDBContext, error) { + + var wfDBCtx sqldb.WorkflowDBContext + var err error + + //wfDBCtx.TableName = wfc.Config.Persistence.TableName + wfDBCtx.NodeStatusOffload = config.NodeStatusOffload + + wfDBCtx.Session, wfDBCtx.TableName, err = sqldb.CreateDBSession(kubeClientSet, namespace, config) + + if err != nil { + log.Errorf("Error in createPersistenceContext. %v", err) + return nil, err + } + + return &wfDBCtx, nil +} + +func (s *WorkflowServer) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { + + md, _ := metadata.FromIncomingContext(ctx) + + if s.EnableClientAuth { + return s.WfClientset, s.KubeClientset, nil + } + + var restConfigStr, bearerToken string + if len(md.Get(CLIENT_REST_CONFIG)) == 0 { + return nil,nil, errors.New("Client kubeconfig is not found") + } + restConfigStr = md.Get(CLIENT_REST_CONFIG)[0] + + if len(md.Get(AUTH_TOKEN)) > 0 { + bearerToken = md.Get(AUTH_TOKEN)[0] + } + + restConfig := rest.Config{} + + err := json.Unmarshal([]byte(restConfigStr), &restConfig) + if err != nil { + return nil, nil, err + } + restConfig.BearerToken = string(bearerToken) + + // create the clientset + wfClientset, err := wfclientset.NewForConfig(&restConfig) + + // create the clientset + clientset, err := kubernetes.NewForConfig(&restConfig) + + if err != nil { + log.Warnf("Failure to create WfClientset. ClientConfig: %s, Error: %s", restConfig, err) + return nil, nil, err + } + + return wfClientset, clientset, nil +} + +func (s *WorkflowServer) Create(ctx context.Context, in *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { + + wfClient, _, err := s.GetWFClient(ctx) + if err != nil { + return nil, err + } + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + wf, err := s.WfKubeService.Create(wfClient,namespace,in.Workflows) + + if err != nil { + log.Warnf("Create request is failed. Error: %s", err) + return nil, err + } + log.Info("Workflow created successfully. Name: %s", wf.Name) + return wf, nil +} + +func (s *WorkflowServer) Get(ctx context.Context, in *WorkflowGetRequest) (*v1alpha1.Workflow, error) { + + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + var wf *v1alpha1.Workflow + + if s.WfDBService != nil { + wf, err = s.WfDBService.Get(in.WorkflowName, in.Namespace) + }else { + wf, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + } + if err != nil { + return nil, err + } + + return wf, err +} + +func (s *WorkflowServer) List(ctx context.Context, in *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { + + namespace := s.Namespace + + if in.Namespace != "" { + namespace = in.Namespace + } + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + listOpt := in.ListOptions + var wfList *v1alpha1.WorkflowList + if s.WfDBService != nil { + wfList, err = s.WfDBService.List(namespace, uint(listOpt.Limit),"") + }else { + wfList, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).List(*listOpt) + } + if err != nil { + fmt.Println(err) + } + + return wfList, nil + +} + + + +func (s *WorkflowServer) Delete(ctx context.Context, in *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { + namespace := s.Namespace + + if in.Namespace != "" { + namespace = in.Namespace + } + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(in.WorkflowName, &v1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + return nil, err + } + //msgStr := fmt.Sprint("Workflow '%s' deleted\n", in.WorkflowName) + return nil, nil +} + +func (s *WorkflowServer) Retry(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + + wfClient, kubeClient, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + + if err != nil { + return nil, err + } + + wf, err = util.RetryWorkflow(kubeClient, wfClient.ArgoprojV1alpha1().Workflows(namespace), wf) + + if err != nil { + return nil, err + } + return wf, err +} + +func (s *WorkflowServer) Resubmit(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + + newWF, err := util.FormulateResubmitWorkflow(wf, in.Memoized) + + created, err := util.SubmitWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfClient, namespace, newWF, nil) + + if err != nil { + fmt.Println(err) + return nil, err + } + + return created, err +} + +func (s *WorkflowServer) Resume(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + + err = util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) + if err != nil { + log.Warnf("Failed to resume %s: %+v", in.WorkflowName, err) + return nil, err + } + + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + + if err != nil { + return nil, err + } + return wf, nil +} + +func (s *WorkflowServer) Suspend(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + err = util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) + + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + if err != nil { + return nil, err + } + return wf, nil +} + +func (s *WorkflowServer) Terminate(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + wfClient, _, err := s.GetWFClient(ctx) + + if err != nil { + return nil, err + } + err = util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) + + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + if err != nil { + return nil, err + } + return wf, nil +} \ No newline at end of file diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go index df17c262d6ca..ac724a6128d2 100644 --- a/cmd/server/workflow/workflow_service.go +++ b/cmd/server/workflow/workflow_service.go @@ -16,18 +16,18 @@ import ( "k8s.io/client-go/rest" ) -type Server struct { +type KubeService struct { Namespace string WfClientset *versioned.Clientset KubeClientset *kubernetes.Clientset EnableClientAuth bool } -func NewServer(Namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, enableClientAuth bool) WorkflowServiceServer { - return &Server{Namespace: Namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} +func NewKubeServer(Namespace string, wfClientset *wfclientset.Clientset, kubeClientSet *kubernetes.Clientset, enableClientAuth bool) *KubeService { + return &KubeService{Namespace: Namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} } -func (s *Server) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { +func (s *KubeService) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { md, _ := metadata.FromIncomingContext(ctx) @@ -37,7 +37,7 @@ func (s *Server) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubern var restConfigStr, bearerToken string if len(md.Get(CLIENT_REST_CONFIG)) == 0 { - return nil,nil, errors.New("Client kubeconfig is not found") + return nil, nil, errors.New("Client kubeconfig is not found") } restConfigStr = md.Get(CLIENT_REST_CONFIG)[0] @@ -53,7 +53,6 @@ func (s *Server) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubern } restConfig.BearerToken = string(bearerToken) - fmt.Println(restConfigStr) // create the clientset wfClientset, err := wfclientset.NewForConfig(&restConfig) @@ -68,17 +67,8 @@ func (s *Server) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubern return wfClientset, clientset, nil } -func (s *Server) Create(ctx context.Context, in *v1alpha1.Workflow) (*v1alpha1.Workflow, error) { +func (s *KubeService) Create(wfClient *versioned.Clientset, namespace string, in *v1alpha1.Workflow) (*v1alpha1.Workflow, error) { - wfClient, _, err := s.GetWFClient(ctx) - if err != nil { - return nil, err - } - - namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace - } wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Create(in) if err != nil { log.Warnf("Create request is failed. Error: %s", err) @@ -88,19 +78,9 @@ func (s *Server) Create(ctx context.Context, in *v1alpha1.Workflow) (*v1alpha1.W return wf, nil } -func (s *Server) Get(ctx context.Context, in *WorkflowGetRequest) (*v1alpha1.Workflow, error) { +func (s *KubeService) Get(wfClient *versioned.Clientset, namespace string, in *WorkflowGetRequest) (*v1alpha1.Workflow, error) { - namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace - } - wfClient, _, err := s.GetWFClient(ctx) - - if err != nil { - return nil, err - } - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err @@ -109,18 +89,7 @@ func (s *Server) Get(ctx context.Context, in *WorkflowGetRequest) (*v1alpha1.Wor return wf, err } -func (s *Server) List(ctx context.Context, in *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { - - namespace := s.Namespace - - if in.Namespace != "" { - namespace = in.Namespace - } - wfClient, _, err := s.GetWFClient(ctx) - - if err != nil { - return nil, err - } +func (s *KubeService) List(wfClient *versioned.Clientset, namespace string, in *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { wfList, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).List(v1.ListOptions{}) if err != nil { @@ -131,40 +100,20 @@ func (s *Server) List(ctx context.Context, in *WorkflowListRequest) (*v1alpha1.W } +func (s *KubeService) Delete(wfClient *versioned.Clientset, namespace string, in *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { - -func (s *Server) Delete(ctx context.Context, in *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { - namespace := s.Namespace - - if in.Namespace != "" { - namespace = in.Namespace - } - wfClient, _, err := s.GetWFClient(ctx) - - if err != nil { - return nil, err - } - err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(in.WorkflowName, &v1.DeleteOptions{}) + err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(in.WorkflowName, &v1.DeleteOptions{}) if err != nil { log.Fatal(err) return nil, err } - - return fmt.Sprint("Workflow '%s' deleted\n", in.WorkflowName), nil + //fmt.Sprint("Workflow '%s' deleted\n", in.WorkflowName) + return nil , nil } -func (s *Server) Retry(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace - } +func (s *KubeService) Retry(wfClient *versioned.Clientset, kubeClient *kubernetes.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - wfClient, kubeClient, err := s.GetWFClient(ctx) - - if err != nil { - return nil, err - } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err @@ -178,19 +127,9 @@ func (s *Server) Retry(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha return wf, err } -func (s *Server) Resubmit(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { - namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace - } +func (s *KubeService) Resubmit(wfClient *versioned.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - wfClient, _, err := s.GetWFClient(ctx) - - if err != nil { - return nil, err - } - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Workflow.Name, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) newWF, err := util.FormulateResubmitWorkflow(wf, in.Memoized) @@ -204,24 +143,15 @@ func (s *Server) Resubmit(ctx context.Context, in *WorkflowUpdateQuery) (*v1alph return created, err } -func (s *Server) Resume(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { - namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace - } - wfClient, _, err := s.GetWFClient(ctx) - - if err != nil { - return nil, err - } +func (s *KubeService) Resume(wfClient *versioned.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - err = util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.Wfname) + err := util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) if err != nil { - log.Warnf("Failed to resume %s: %+v", in.Wfname, err) + log.Warnf("Failed to resume %s: %+v", in.WorkflowName, err) return nil, err } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err @@ -229,41 +159,28 @@ func (s *Server) Resume(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1 return wf, nil } -func (s *Server) Suspend(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { - namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace - } +func (s *KubeService) Suspend(wfClient *versioned.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - wfClient, _, err := s.GetWFClient(ctx) + err := util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) - if err != nil { - return nil, err - } - err = util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.Wfname) - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } return wf, nil } -func (s *Server) Terminate(ctx context.Context, in *WorkflowUpdateQuery) (*v1alpha1.Workflow, error) { - namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace - } - wfClient, _, err := s.GetWFClient(ctx) +func (s *KubeService) Terminate(wfClient *versioned.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - if err != nil { - return nil, err - } - err = util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.Wfname) + err := util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.Wfname, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } return wf, nil } + +func (s *WorkflowServer) PodLogs(*WorkflowLogRequest, WorkflowService_PodLogsServer) error { + panic("implement me") +} \ No newline at end of file diff --git a/workflow/persist/sqldb/mocks/DBRepository.go b/persist/sqldb/mocks/DBRepository.go similarity index 100% rename from workflow/persist/sqldb/mocks/DBRepository.go rename to persist/sqldb/mocks/DBRepository.go diff --git a/workflow/persist/sqldb/sqldb.go b/persist/sqldb/sqldb.go similarity index 100% rename from workflow/persist/sqldb/sqldb.go rename to persist/sqldb/sqldb.go diff --git a/workflow/persist/sqldb/workflow_repository.go b/persist/sqldb/workflow_repository.go similarity index 79% rename from workflow/persist/sqldb/workflow_repository.go rename to persist/sqldb/workflow_repository.go index 06fc2e600ae4..917444a7e46d 100644 --- a/workflow/persist/sqldb/workflow_repository.go +++ b/persist/sqldb/workflow_repository.go @@ -3,6 +3,7 @@ package sqldb import ( "context" "encoding/json" + "upper.io/db.v3" "strings" "time" @@ -24,10 +25,12 @@ type ( DBRepository interface { Save(wf *wfv1.Workflow) error Get(uid string) (*wfv1.Workflow, error) - List() ([]wfv1.Workflow, error) - Query(condition interface{}) ([]wfv1.Workflow, error) + List(orderBy interface{}) (*wfv1.WorkflowList, error) + Query(condition db.Cond, orderBy ...interface{}) ([]wfv1.Workflow, error) Close() error IsNodeStatusOffload() bool + QueryWithPagination(condition db.Cond, pageSize uint, lastID string, orderBy ...interface{})(*wfv1.WorkflowList, error) + } ) @@ -149,37 +152,49 @@ func (wdc *WorkflowDBContext) update(wfDB *WorkflowDB) error { } func (wdc *WorkflowDBContext) Get(uid string) (*wfv1.Workflow, error) { - var wfDB WorkflowDB - var wf wfv1.Workflow + if wdc.Session == nil { return nil, DBInvalidSession(nil, "DB session is not initiallized") } + cond := db.Cond{"id":uid} + + wfs, err := wdc.Query(cond) - err := wdc.Session.Collection(wdc.TableName).Find("id", uid).One(&wfDB) if err != nil { return nil, DBOperationError(err, "DB GET operation failed") } - if wfDB.Id != "" { - err := json.Unmarshal([]byte(wfDB.Workflow), &wf) - if err != nil { - log.Warnf(" Workflow unmarshalling failed for row=%v", wfDB) - } - } else { + + if len(wfs) >0 { + return &wfs[0], nil + } return nil, DBOperationError(nil, "Row is not found") +} + +func (wdc *WorkflowDBContext) List(orderBy interface{}) (*wfv1.WorkflowList, error) { + if wdc.Session == nil { + return nil, DBInvalidSession(nil, "DB session is not initialized") } - return &wf, nil + wfs, err := wdc.Query(nil, orderBy) + + if err != nil { + return nil, err + } + var wfList wfv1.WorkflowList + wfList.Items = wfs + + return &wfList, nil } -func (wdc *WorkflowDBContext) List() ([]wfv1.Workflow, error) { - var wfDBs []WorkflowDB +func (wdc *WorkflowDBContext) Query(condition db.Cond, orderBy ...interface{} ) ([]wfv1.Workflow, error) { + var wfDBs []WorkflowDB if wdc.Session == nil { return nil, DBInvalidSession(nil, "DB session is not initialized") } - if err := wdc.Session.Collection(wdc.TableName).Find().OrderBy(" startedAt DESC").All(&wfDBs); err != nil { - return nil, DBOperationError(err, "DB List operation failed") + if err := wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(orderBy).All(&wfDBs); err != nil { + return nil, DBOperationError(err, "DB Query opeartion failed") } var wfs []wfv1.Workflow for _, wfDB := range wfDBs { @@ -194,15 +209,24 @@ func (wdc *WorkflowDBContext) List() ([]wfv1.Workflow, error) { return wfs, nil } -func (wdc *WorkflowDBContext) Query(condition interface{}) ([]wfv1.Workflow, error) { +func (wdc *WorkflowDBContext) Close() error { + if wdc.Session == nil { + return DBInvalidSession(nil, "DB session is not initialized") + } + return wdc.Session.Close() +} + + +func (wdc *WorkflowDBContext) QueryWithPagination(condition db.Cond, pageLimit uint, lastId string, orderBy ...interface{} ) (*wfv1.WorkflowList, error) { var wfDBs []WorkflowDB if wdc.Session == nil { return nil, DBInvalidSession(nil, "DB session is not initialized") } - if err := wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(" startedAt DESC").All(&wfDBs); err != nil { + if err := wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(orderBy).Paginate(pageLimit).NextPage(lastId).All(&wfDBs); err != nil { return nil, DBOperationError(err, "DB Query opeartion failed") } + var wfs []wfv1.Workflow for _, wfDB := range wfDBs { var wf wfv1.Workflow @@ -213,12 +237,9 @@ func (wdc *WorkflowDBContext) Query(condition interface{}) ([]wfv1.Workflow, err wfs = append(wfs, wf) } } - return wfs, nil -} -func (wdc *WorkflowDBContext) Close() error { - if wdc.Session == nil { - return DBInvalidSession(nil, "DB session is not initialized") - } - return wdc.Session.Close() -} + var wfList wfv1.WorkflowList + wfList.Items = wfs + + return &wfList, nil +} \ No newline at end of file diff --git a/pkg/apis/workflow/v1alpha1/generated.proto b/pkg/apis/workflow/v1alpha1/generated.proto index a606b5d2a023..8b0f4e4cb585 100644 --- a/pkg/apis/workflow/v1alpha1/generated.proto +++ b/pkg/apis/workflow/v1alpha1/generated.proto @@ -668,6 +668,9 @@ message Template { // +patchMergeKey=ip repeated k8s.io.api.core.v1.HostAlias hostAliases = 29; + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional optional k8s.io.api.core.v1.PodSecurityContext securityContext = 30; // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of @@ -844,7 +847,6 @@ message WorkflowSpec { // +optional optional string schedulerName = 21; - // PodGC describes the strategy to use when to deleting completed pods optional PodGC podGC = 22; // PriorityClassName to apply to workflow pods. @@ -857,9 +859,6 @@ message WorkflowSpec { // +patchMergeKey=ip repeated k8s.io.api.core.v1.HostAlias hostAliases = 25; - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optiona optional k8s.io.api.core.v1.PodSecurityContext securityContext = 26; // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 5883dbae9a49..8cffc24b732a 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -2216,7 +2216,8 @@ func schema_pkg_apis_workflow_v1alpha1_Template(ref common.ReferenceCallback) co }, "securityContext": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, "podSpecPatch": { @@ -2879,8 +2880,7 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, "podGC": { SchemaProps: spec.SchemaProps{ - Description: "PodGC describes the strategy to use when to deleting completed pods", - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.PodGC"), + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.PodGC"), }, }, "podPriorityClassName": { @@ -2917,8 +2917,7 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, "securityContext": { SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, "podSpecPatch": { diff --git a/pkg/apis/workflow/v1alpha1/workflow_types.go b/pkg/apis/workflow/v1alpha1/workflow_types.go index 326e9509047c..d33c7505561a 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_types.go +++ b/pkg/apis/workflow/v1alpha1/workflow_types.go @@ -83,7 +83,6 @@ type TemplateHolder interface { // +genclient:noStatus // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type Workflow struct { - metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` Spec WorkflowSpec `json:"spec" protobuf:"bytes,2,opt,name=spec "` @@ -137,7 +136,6 @@ type WorkflowSpec struct { // +patchMergeKey=name VolumeClaimTemplates []apiv1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,opt,name=volumeClaimTemplates"` - // Parallelism limits the max total parallel pods that can execute at the same time in a workflow Parallelism *int64 `json:"parallelism,omitempty" protobuf:"bytes,7,opt,name=parallelism"` @@ -212,18 +210,6 @@ type WorkflowSpec struct { // PodGC describes the strategy to use when to deleting completed pods - PodGC *PodGC `json:"podGC,omitempty" protobuf:"bytes,26,opt,name=podGC"` - - // PriorityClassName to apply to workflow pods. - PodPriorityClassName string `json:"podPriorityClassName,omitempty" protobuf:"bytes,22,opt,name=podPriorityClassName"` - - // Priority to apply to workflow pods. - PodPriority *int32 `json:"podPriority,omitempty" protobuf:"bytes,23,opt,name=podPriority"` - - // +patchStrategy=merge - // +patchMergeKey=ip - HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,24,opt,name=hostAliases"` - PodGC *PodGC `json:"podGC,omitempty" protobuf:"bytes,22,opt,name=podGC"` // PriorityClassName to apply to workflow pods. @@ -262,25 +248,6 @@ func (p *ParallelSteps) UnmarshalJSON(value []byte) error { func (p *ParallelSteps) MarshalJSON() ([]byte, error) { fmt.Println(p.Steps) return json.Marshal(p.Steps) - -} - -type ParallelSteps struct { - Steps []WorkflowStep `protobuf:"bytes,1,rep,name=steps"` -} - -func (p *ParallelSteps) UnmarshalJSON(value []byte) error { - err := json.Unmarshal(value, &p.Steps) - if err != nil { - return err - } - return nil -} - -func (p *ParallelSteps) MarshalJSON() ([]byte, error) { - fmt.Println(p.Steps) - return json.Marshal(p.Steps) - } func (wfs *WorkflowSpec) HasPodSpecPatch() bool { @@ -379,7 +346,6 @@ type Template struct { // +patchMergeKey=key Tolerations []apiv1.Toleration `json:"tolerations,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,24,opt,name=tolerations"` - // If specified, the pod will be dispatched by specified scheduler. // Or it will be dispatched by workflow scope scheduler if specified. // If neither specified, the pod will be dispatched by default scheduler. @@ -407,7 +373,6 @@ type Template struct { // +patchMergeKey=ip HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,29,opt,name=hostAliases"` - // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional @@ -416,7 +381,6 @@ type Template struct { // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of // container fields which are not strings (e.g. resource limits). PodSpecPatch string `json:"podSpecPatch,omitempty" protobuf:"bytes,31,opt,name=podSpecPatch"` - } var _ TemplateHolder = &Template{} @@ -459,7 +423,6 @@ type Inputs struct { // +patchStrategy=merge // +patchMergeKey=name Artifacts []Artifact `json:"artifacts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,opt,name=artifacts"` - } // Pod metdata diff --git a/workflow/controller/controller.go b/workflow/controller/controller.go index bc051ad47876..51139e5c0427 100644 --- a/workflow/controller/controller.go +++ b/workflow/controller/controller.go @@ -26,11 +26,11 @@ import ( "k8s.io/client-go/util/workqueue" "github.com/argoproj/argo" + "github.com/argoproj/argo/persist/sqldb" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" "github.com/argoproj/argo/workflow/common" "github.com/argoproj/argo/workflow/config" "github.com/argoproj/argo/workflow/metrics" - "github.com/argoproj/argo/workflow/persist/sqldb" "github.com/argoproj/argo/workflow/ttlcontroller" "github.com/argoproj/argo/workflow/util" ) diff --git a/workflow/controller/operator_persist_test.go b/workflow/controller/operator_persist_test.go index cc13973280b0..a5e24b3536e9 100644 --- a/workflow/controller/operator_persist_test.go +++ b/workflow/controller/operator_persist_test.go @@ -7,9 +7,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/argoproj/argo/persist/sqldb" + "github.com/argoproj/argo/persist/sqldb/mocks" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo/workflow/persist/sqldb" - "github.com/argoproj/argo/workflow/persist/sqldb/mocks" ) func getMockDBCtx(expectedResullt interface{}, largeWfSupport bool, isInterfaceNil bool) sqldb.DBRepository { From 0987a21adceab357befbf64fac9d5e6cae16d96b Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Wed, 13 Nov 2019 11:38:04 -0800 Subject: [PATCH 008/421] Added GRPC GW changes --- Gopkg.lock | 40 + cmd/client/client.go | 8 +- cmd/server/apiserver/argoserver.go | 204 ++-- cmd/server/main.go | 11 +- cmd/server/workflow/workflow.pb.go | 1109 ++++++++++++++++++-- cmd/server/workflow/workflow.pb.gw.go | 176 +++- cmd/server/workflow/workflow.proto | 52 +- cmd/server/workflow/workflow.swagger.json | 767 ++++++++++---- cmd/server/workflow/workflow_db_service.go | 10 +- cmd/server/workflow/workflow_server.go | 250 ++++- cmd/server/workflow/workflow_service.go | 4 - hack/generate-proto.sh | 132 +-- persist/sqldb/workflow_repository.go | 8 + 13 files changed, 2310 insertions(+), 461 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 69198303a559..47dafcc4281d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -274,7 +274,9 @@ digest = "1:8a7fe65e9ac2612c4df602cc9f014a92406776d993ff0f28335e5a8831d87c53" name = "github.com/gogo/protobuf" packages = [ + "gogoproto", "proto", + "protoc-gen-gogo/descriptor", "sortkeys", ] pruneopts = "" @@ -336,6 +338,22 @@ revision = "ab0dd09aa10e2952b28e12ecd35681b20463ebab" version = "v0.3.1" +[[projects]] + digest = "1:220dccbfc231f373f5ffc0f66bb3d9cd3b1394e38c042a9a458535b57335b05a" + name = "github.com/gophercloud/gophercloud" + packages = [ + ".", + "openstack", + "openstack/identity/v2/tenants", + "openstack/identity/v2/tokens", + "openstack/identity/v3/tokens", + "openstack/utils", + "pagination", + ] + pruneopts = "" + revision = "a8bdb516e71d7c586306501b30b80f06f53c013e" + version = "v0.6.0" + [[projects]] digest = "1:03168f6041f164c06dc6acaaab4ed3ad1c6088b717c365cec892b35c80f4ffc7" name = "github.com/gorilla/websocket" @@ -595,6 +613,14 @@ revision = "839c75faf7f98a33d445d181f3018b5c3409a45e" version = "v1.4.2" +[[projects]] + digest = "1:022a4e2a8c327eb46a99088a51c0dda5d5be86928ace2afd72145dc1d746a323" + name = "github.com/soheilhy/cmux" + packages = ["."] + pruneopts = "" + revision = "e09e9389d85d8492d313d73d1469c029e710623f" + version = "v0.1.4" + [[projects]] digest = "1:9ba49264cef4386aded205f9cb5b1f2d30f983d7dc37a21c780d9db3edfac9a7" name = "github.com/spf13/cobra" @@ -889,6 +915,7 @@ digest = "1:e1505a39ad844b6a89856ffb97363cc47cbee0c511c1423d2d9c673cc4a215a0" name = "google.golang.org/genproto" packages = [ + "googleapis/api/annotations", "googleapis/api/httpbody", "googleapis/rpc/status", "protobuf/field_mask", @@ -1290,6 +1317,7 @@ "plugin/pkg/client/auth/exec", "plugin/pkg/client/auth/gcp", "plugin/pkg/client/auth/oidc", + "plugin/pkg/client/auth/openstack", "rest", "rest/watch", "testing", @@ -1444,14 +1472,19 @@ "github.com/evanphx/json-patch", "github.com/ghodss/yaml", "github.com/go-openapi/spec", + "github.com/gogo/protobuf/gogoproto", "github.com/gogo/protobuf/proto", "github.com/gogo/protobuf/sortkeys", + "github.com/golang/protobuf/proto", "github.com/gorilla/websocket", + "github.com/grpc-ecosystem/grpc-gateway/runtime", + "github.com/grpc-ecosystem/grpc-gateway/utilities", "github.com/mitchellh/go-ps", "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", "github.com/sirupsen/logrus", + "github.com/soheilhy/cmux", "github.com/spf13/cobra", "github.com/stretchr/testify/assert", "github.com/stretchr/testify/mock", @@ -1460,7 +1493,12 @@ "github.com/valyala/fasttemplate", "golang.org/x/crypto/ssh", "golang.org/x/net/context", + "google.golang.org/genproto/googleapis/api/annotations", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/grpclog", "google.golang.org/grpc/metadata", + "google.golang.org/grpc/status", "gopkg.in/jcmturner/gokrb5.v5/client", "gopkg.in/jcmturner/gokrb5.v5/config", "gopkg.in/jcmturner/gokrb5.v5/credentials", @@ -1498,10 +1536,12 @@ "k8s.io/client-go/plugin/pkg/client/auth/azure", "k8s.io/client-go/plugin/pkg/client/auth/gcp", "k8s.io/client-go/plugin/pkg/client/auth/oidc", + "k8s.io/client-go/plugin/pkg/client/auth/openstack", "k8s.io/client-go/rest", "k8s.io/client-go/testing", "k8s.io/client-go/tools/cache", "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/clientcmd/api", "k8s.io/client-go/tools/remotecommand", "k8s.io/client-go/tools/watch", "k8s.io/client-go/util/flowcontrol", diff --git a/cmd/client/client.go b/cmd/client/client.go index ac72eb8c4955..2e0294221145 100644 --- a/cmd/client/client.go +++ b/cmd/client/client.go @@ -60,7 +60,7 @@ func homeDir() string { //} func main(){ //generate() - conn, err := grpc.Dial("localhost:8082", grpc.WithInsecure()) + conn, err := grpc.Dial("localhost:8080", grpc.WithInsecure()) if err != nil { fmt.Println(err) } @@ -85,7 +85,7 @@ func main(){ by,err := json.Marshal(clientConfig) fmt.Println(err) // - md := metadata.Pairs(workflow.CLIENT_REST_CONFIG, string(by), workflow.AUTH_TOKEN, clientConfig.AuthProvider.Config["access-token"]) + md := metadata.Pairs(workflow.CLIENT_REST_CONFIG, string(by)) ctx := metadata.NewOutgoingContext(context.Background(), md) //wq := workflow.WorkflowQuery{} //created, err :=client.Get(ctx,&wq) @@ -93,8 +93,8 @@ func main(){ //fmt.Println("errr",err) // fmt.Println(string(by)) - wq := workflow.WorkflowQuery{Name:"retry-to-completion-d5j29", Namespace:"workflows"} - queried, err := client.Get(ctx, &wq) + wq := workflow.WorkflowListRequest { Namespace:"default"} + queried, err := client.List(ctx, &wq) if err !=nil { fmt.Println("errr",err) } diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index b64c4ee14eb8..e6dd57def61d 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -1,15 +1,17 @@ package apiserver import ( + "crypto/tls" "github.com/argoproj/argo/cmd/server/workflow" "github.com/argoproj/argo/errors" "github.com/argoproj/argo/pkg/apiclient" "github.com/argoproj/argo/pkg/client/clientset/versioned" + "github.com/argoproj/argo/util/json" "github.com/argoproj/argo/workflow/common" "github.com/argoproj/argo/workflow/config" golang_proto "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/prometheus/common/log" + log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc" apiv1 "k8s.io/api/core/v1" @@ -20,6 +22,7 @@ import ( "sigs.k8s.io/yaml" "fmt" + "github.com/soheilhy/cmux" "k8s.io/client-go/kubernetes" "net/http" "time" @@ -27,25 +30,27 @@ import ( type ArgoServer struct { Namespace string - KubeClientset kubernetes.Clientset + KubeClientset *kubernetes.Clientset WfClientSet *versioned.Clientset EnableClientAuth bool - Config *config.WorkflowControllerConfig - ConfigName string + Config *config.WorkflowControllerConfig + ConfigName string + stopCh chan struct{} } type ArgoServerOpts struct { Insecure bool Namespace string - KubeClientset *versioned.Clientset + KubeClientset *kubernetes.Clientset + WfClientSet *versioned.Clientset EnableClientAuth bool - ConfigName string + ConfigName string } func NewArgoServer(ctx context.Context, opts ArgoServerOpts) *ArgoServer { - return &ArgoServer{Namespace: opts.Namespace, WfClientSet: opts.KubeClientset, - EnableClientAuth: opts.EnableClientAuth, ConfigName:opts.ConfigName} + return &ArgoServer{Namespace: opts.Namespace, WfClientSet: opts.WfClientSet, KubeClientset: opts.KubeClientset, + EnableClientAuth: opts.EnableClientAuth, ConfigName: opts.ConfigName} } var backoff = wait.Backoff{ @@ -55,15 +60,82 @@ var backoff = wait.Backoff{ Jitter: 0.1, } +func (as *ArgoServer) useTLS() bool { + + return false +} + func (as *ArgoServer) Run(ctx context.Context, port int) { grpcs := as.newGRPCServer() - //grpcWebS := grpcweb.WrapServer(grpcs) + var httpS *http.Server + var httpsS *http.Server + if as.useTLS() { + httpS = newRedirectServer(port) + httpsS = as.newHTTPServer(ctx, port) + } else { + httpS = as.newHTTPServer(ctx, port) + } - lis, err := net.Listen("tcp", fmt.Sprintf(":%d", 8082)) - if err != nil { - log.Fatalf("failed to listen: %v", err) + // Start listener + var conn net.Listener + var realErr error + _ = wait.ExponentialBackoff(backoff, func() (bool, error) { + conn, realErr = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + if realErr != nil { + log.Warnf("failed listen: %v", realErr) + return false, nil + } + return true, nil + }) + + // Cmux is used to support servicing gRPC and HTTP1.1+JSON on the same port + tcpm := cmux.New(conn) + var tlsm cmux.CMux + var grpcL net.Listener + var httpL net.Listener + var httpsL net.Listener + if !as.useTLS() { + httpL = tcpm.Match(cmux.HTTP1Fast()) + grpcL = tcpm.Match(cmux.Any()) + } else { + // We first match on HTTP 1.1 methods. + //httpL = tcpm.Match(cmux.HTTP1Fast()) + + // If not matched, we assume that its TLS. + tlsl := tcpm.Match(cmux.Any()) + tlsConfig := tls.Config{ + //Certificates: []tls.Certificate{*as.settings.Certificate}, + } + //if as.TLSConfigCustomizer != nil { + // as.TLSConfigCustomizer(&tlsConfig) + //} + + tlsl = tls.NewListener(tlsl, &tlsConfig) + + // Now, we build another mux recursively to match HTTPS and gRPC. + tlsm = cmux.New(tlsl) + httpsL = tlsm.Match(cmux.HTTP1Fast()) + grpcL = tlsm.Match(cmux.Any()) + } + //lis, err := net.Listen("tcp", fmt.Sprintf(":%d", 8083)) + //if err != nil { + // log.Fatalf("failed to listen: %v", err) + //} + //lis1, err := net.Listen("tcp", fmt.Sprintf(":%d", 8082)) + //if err != nil { + // log.Fatalf("failed to listen: %v", err) + //} + go func() { as.checkServeErr("grpcS", grpcs.Serve(grpcL)) }() + go func() { as.checkServeErr("httpS", httpS.Serve(httpL)) }() + if as.useTLS() { + go func() { as.checkServeErr("httpsS", httpsS.Serve(httpsL)) }() + go func() { as.checkServeErr("tlsm", tlsm.Serve()) }() } - grpcs.Serve(lis) + go func() { as.checkServeErr("tcpm", tcpm.Serve()) }() + + as.stopCh = make(chan struct{}) + <-as.stopCh + } func (as *ArgoServer) newGRPCServer() *grpc.Server { @@ -77,49 +149,47 @@ func (as *ArgoServer) newGRPCServer() *grpc.Server { } grpcS := grpc.NewServer(sOpts...) - configMap, err := as.RsyncConfig(as.Namespace, as.WfClientSet, &as.KubeClientset) + configMap, err := as.RsyncConfig(as.Namespace, as.WfClientSet, as.KubeClientset) if err != nil { - panic("Error marshalling config map") + //panic("Error marshalling config map") } - workflowServer := workflow.NewWorkflowServer(as.Namespace, as.WfClientSet, &as.KubeClientset, configMap, as.EnableClientAuth) + workflowServer := workflow.NewWorkflowServer(as.Namespace, as.WfClientSet, as.KubeClientset, configMap, as.EnableClientAuth) workflow.RegisterWorkflowServiceServer(grpcS, workflowServer) + return grpcS } -//// newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented -//// using grpc-gateway as a proxy to the gRPC server. -//func (a *ArgoServer) newHTTPServer(ctx context.Context, port int, grpcWebHandler http.Handler) *http.KubeService { -// endpoint := fmt.Sprintf("localhost:%d", port) -// mux := http.NewServeMux() -// httpS := http.KubeService{ -// Addr: endpoint, -// Handler: &handlerSwitcher{ -// handler: &bug21955Workaround{handler: mux}, -// contentTypeToHandler: map[string]http.Handler{ -// "application/grpc-web+proto": grpcWebHandler, -// }, -// }, -// } -// var dOpts []grpc.DialOption -// dOpts = append(dOpts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(apiclient.MaxGRPCMessageSize))) -// //dOpts = append(dOpts, grpc.WithUserAgent(fmt.Sprintf("%s/%s", common.ArgoCDUserAgentName, argocd.GetVersion().Version))) -// -// dOpts = append(dOpts, grpc.WithInsecure()) -// -// // HTTP 1.1+JSON Server -// // grpc-ecosystem/grpc-gateway is used to proxy HTTP requests to the corresponding gRPC call -// // NOTE: if a marshaller option is not supplied, grpc-gateway will default to the jsonpb from -// // golang/protobuf. Which does not support types such as time.Time. gogo/protobuf does support -// // time.Time, but does not support custom UnmarshalJSON() and MarshalJSON() methods. Therefore -// //// we use our own Marshaler -// gwMuxOpts := runtime.WithMarshalerOption(runtime.MIMEWildcard, new(jsonutil.JSONMarshaler)) -// gwCookieOpts := runtime.WithForwardResponseOption(a.translateGrpcCookieHeader) -// gwmux := runtime.NewServeMux(gwMuxOpts, gwCookieOpts) -// mux.Handle("/api/", gwmux) -// mustRegisterGWHandler(workflow.RegisterWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dOpts) -// -// return &httpS -//} +// newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented +// using grpc-gateway as a proxy to the gRPC server. +func (a *ArgoServer) newHTTPServer(ctx context.Context, port int) *http.Server { + endpoint := fmt.Sprintf("localhost:%d", port) + + mux := http.NewServeMux() + httpS := http.Server{ + Addr: endpoint, + Handler: mux, + } + var dOpts []grpc.DialOption + dOpts = append(dOpts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(apiclient.MaxGRPCMessageSize))) + //dOpts = append(dOpts, grpc.WithUserAgent(fmt.Sprintf("%s/%s", common.ArgoCDUserAgentName, argocd.GetVersion().Version))) + + dOpts = append(dOpts, grpc.WithInsecure()) + + // HTTP 1.1+JSON Server + // grpc-ecosystem/grpc-gateway is used to proxy HTTP requests to the corresponding gRPC call + // NOTE: if a marshaller option is not supplied, grpc-gateway will default to the jsonpb from + // golang/protobuf. Which does not support types such as time.Time. gogo/protobuf does support + // time.Time, but does not support custom UnmarshalJSON() and MarshalJSON() methods. Therefore + //// we use our own Marshaler + gwMuxOpts := runtime.WithMarshalerOption(runtime.MIMEWildcard, new(json.JSONMarshaler)) + gwCookieOpts := runtime.WithForwardResponseOption(a.translateGrpcCookieHeader) + gwmux := runtime.NewServeMux(gwMuxOpts, gwCookieOpts) + mustRegisterGWHandler(workflow.RegisterWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dOpts) + + mux.Handle("/api/", gwmux) + return &httpS +} + type registerFunc func(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) error // mustRegisterGWHandler is a convenience function to register a gateway handler @@ -149,10 +219,7 @@ type bug21955Workaround struct { } var pathPatters = []*regexp.Regexp{ - regexp.MustCompile(`/api/v1/clusters/[^/]+`), - regexp.MustCompile(`/api/v1/repositories/[^/]+`), - regexp.MustCompile(`/api/v1/repositories/[^/]+/apps`), - regexp.MustCompile(`/api/v1/repositories/[^/]+/apps/[^/]+`), + regexp.MustCompile(`/api/v1/workflows/[^/]+`), } func (bf *bug21955Workaround) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -190,16 +257,16 @@ func (a *ArgoServer) translateGrpcCookieHeader(ctx context.Context, w http.Respo } // ResyncConfig reloads the controller config from the configmap -func (a *ArgoServer) RsyncConfig(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset)(*config.WorkflowControllerConfig, error){ - cmClient := kubeClientSet.CoreV1().ConfigMaps(namespace) - cm, err := cmClient.Get(a.ConfigName, metav1.GetOptions{}) - if err != nil { - return nil, errors.InternalWrapError(err) - } - return a.UpdateConfig(cm) +func (a *ArgoServer) RsyncConfig(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset) (*config.WorkflowControllerConfig, error) { + cmClient := kubeClientSet.CoreV1().ConfigMaps(namespace) + cm, err := cmClient.Get("workflow-controller-configmap", metav1.GetOptions{}) + if err != nil { + return nil, errors.InternalWrapError(err) + } + return a.UpdateConfig(cm) } -func (a *ArgoServer) UpdateConfig(cm *apiv1.ConfigMap)(*config.WorkflowControllerConfig, error){ +func (a *ArgoServer) UpdateConfig(cm *apiv1.ConfigMap) (*config.WorkflowControllerConfig, error) { configStr, ok := cm.Data[common.WorkflowControllerConfigMapKey] if !ok { return nil, errors.InternalErrorf("ConfigMap '%s' does not have key '%s'", a.ConfigName, common.WorkflowControllerConfigMapKey) @@ -212,3 +279,16 @@ func (a *ArgoServer) UpdateConfig(cm *apiv1.ConfigMap)(*config.WorkflowControlle return &config, nil } +// checkServeErr checks the error from a .Serve() call to decide if it was a graceful shutdown +func (a *ArgoServer) checkServeErr(name string, err error) { + if err != nil { + if a.stopCh == nil { + // a nil stopCh indicates a graceful shutdown + log.Infof("graceful shutdown %s: %v", name, err) + } else { + log.Fatalf("%s: %v", name, err) + } + } else { + log.Infof("graceful shutdown %s", name) + } +} diff --git a/cmd/server/main.go b/cmd/server/main.go index 9442e7d91cda..b665f4d1a60b 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -1,4 +1,4 @@ -package server +package main import ( "fmt" @@ -10,6 +10,7 @@ import ( "github.com/argoproj/pkg/stats" "github.com/spf13/cobra" "golang.org/x/net/context" + "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth/azure" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" @@ -32,6 +33,7 @@ func NewRootCommand() *cobra.Command { logLevel string // --loglevel enableClientAuth string configMap string + port int ) var command = cobra.Command{ @@ -55,6 +57,8 @@ func NewRootCommand() *cobra.Command { return err } + kubeConfig := kubernetes.NewForConfigOrDie(config) + wflientset := wfclientset.NewForConfigOrDie(config) if err != nil { @@ -63,10 +67,10 @@ func NewRootCommand() *cobra.Command { ctx, cancel := context.WithCancel(context.Background()) var clientAuth bool clientAuth, err =strconv.ParseBool( enableClientAuth) - var opts = apiserver.ArgoServerOpts{Namespace: namespace, KubeClientset: wflientset, EnableClientAuth: clientAuth} + var opts = apiserver.ArgoServerOpts{Namespace: namespace, WfClientSet: wflientset,KubeClientset: kubeConfig, EnableClientAuth: clientAuth} argoSvr := apiserver.NewArgoServer(ctx, opts ) defer cancel() - go argoSvr.Run(ctx,8082) + go argoSvr.Run(ctx,port) // Wait forever select {} @@ -76,6 +80,7 @@ func NewRootCommand() *cobra.Command { clientConfig = kubecli.AddKubectlFlagsToCmd(&command) command.AddCommand(cmdutil.NewVersionCmd(CLIName)) + command.Flags().IntVar(&port, "port", 8080, "") command.Flags().StringVar(&enableClientAuth, "enableClientAuth", "false", "") command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration") command.Flags().StringVar(&logLevel, "loglevel", "debug", "Set the logging level. One of: debug|info|warn|error") diff --git a/cmd/server/workflow/workflow.pb.go b/cmd/server/workflow/workflow.pb.go index da6cade80623..e36ce8dad6d9 100644 --- a/cmd/server/workflow/workflow.pb.go +++ b/cmd/server/workflow/workflow.pb.go @@ -18,6 +18,7 @@ import ( codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" + v11 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" math "math" math_bits "math/bits" @@ -34,10 +35,122 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type SubmitOptions struct { + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + GenerateName string `protobuf:"bytes,2,opt,name=GenerateName,proto3" json:"GenerateName,omitempty"` + InstanceID string `protobuf:"bytes,3,opt,name=InstanceID,proto3" json:"InstanceID,omitempty"` + Entrypoint string `protobuf:"bytes,4,opt,name=Entrypoint,proto3" json:"Entrypoint,omitempty"` + Parameters []string `protobuf:"bytes,5,rep,name=Parameters,proto3" json:"Parameters,omitempty"` + ServiceAccount string `protobuf:"bytes,6,opt,name=ServiceAccount,proto3" json:"ServiceAccount,omitempty"` + ServerDryRun bool `protobuf:"varint,7,opt,name=ServerDryRun,proto3" json:"ServerDryRun,omitempty"` + Labels string `protobuf:"bytes,8,opt,name=Labels,proto3" json:"Labels,omitempty"` + OwnerReference *v1.OwnerReference `protobuf:"bytes,9,opt,name=OwnerReference,proto3" json:"OwnerReference,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SubmitOptions) Reset() { *m = SubmitOptions{} } +func (m *SubmitOptions) String() string { return proto.CompactTextString(m) } +func (*SubmitOptions) ProtoMessage() {} +func (*SubmitOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_192bc67c39cca05a, []int{0} +} +func (m *SubmitOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubmitOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubmitOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SubmitOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubmitOptions.Merge(m, src) +} +func (m *SubmitOptions) XXX_Size() int { + return m.Size() +} +func (m *SubmitOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SubmitOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SubmitOptions proto.InternalMessageInfo + +func (m *SubmitOptions) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SubmitOptions) GetGenerateName() string { + if m != nil { + return m.GenerateName + } + return "" +} + +func (m *SubmitOptions) GetInstanceID() string { + if m != nil { + return m.InstanceID + } + return "" +} + +func (m *SubmitOptions) GetEntrypoint() string { + if m != nil { + return m.Entrypoint + } + return "" +} + +func (m *SubmitOptions) GetParameters() []string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *SubmitOptions) GetServiceAccount() string { + if m != nil { + return m.ServiceAccount + } + return "" +} + +func (m *SubmitOptions) GetServerDryRun() bool { + if m != nil { + return m.ServerDryRun + } + return false +} + +func (m *SubmitOptions) GetLabels() string { + if m != nil { + return m.Labels + } + return "" +} + +func (m *SubmitOptions) GetOwnerReference() *v1.OwnerReference { + if m != nil { + return m.OwnerReference + } + return nil +} + type WorkflowCreateRequest struct { Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - Workflows *v1alpha1.Workflow `protobuf:"bytes,2,opt,name=Workflows,proto3" json:"Workflows,omitempty"` + Workflow *v1alpha1.Workflow `protobuf:"bytes,2,opt,name=Workflow,proto3" json:"Workflow,omitempty"` CreateOptions *v1.CreateOptions `protobuf:"bytes,3,opt,name=CreateOptions,proto3" json:"CreateOptions,omitempty"` + SubmitOptions *SubmitOptions `protobuf:"bytes,4,opt,name=SubmitOptions,proto3" json:"SubmitOptions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -47,7 +160,7 @@ func (m *WorkflowCreateRequest) Reset() { *m = WorkflowCreateRequest{} } func (m *WorkflowCreateRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowCreateRequest) ProtoMessage() {} func (*WorkflowCreateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{0} + return fileDescriptor_192bc67c39cca05a, []int{1} } func (m *WorkflowCreateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -83,9 +196,9 @@ func (m *WorkflowCreateRequest) GetNamespace() string { return "" } -func (m *WorkflowCreateRequest) GetWorkflows() *v1alpha1.Workflow { +func (m *WorkflowCreateRequest) GetWorkflow() *v1alpha1.Workflow { if m != nil { - return m.Workflows + return m.Workflow } return nil } @@ -97,6 +210,13 @@ func (m *WorkflowCreateRequest) GetCreateOptions() *v1.CreateOptions { return nil } +func (m *WorkflowCreateRequest) GetSubmitOptions() *SubmitOptions { + if m != nil { + return m.SubmitOptions + } + return nil +} + type WorkflowGetRequest struct { WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` @@ -110,7 +230,7 @@ func (m *WorkflowGetRequest) Reset() { *m = WorkflowGetRequest{} } func (m *WorkflowGetRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowGetRequest) ProtoMessage() {} func (*WorkflowGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{1} + return fileDescriptor_192bc67c39cca05a, []int{2} } func (m *WorkflowGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -172,7 +292,7 @@ func (m *WorkflowListRequest) Reset() { *m = WorkflowListRequest{} } func (m *WorkflowListRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowListRequest) ProtoMessage() {} func (*WorkflowListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{2} + return fileDescriptor_192bc67c39cca05a, []int{3} } func (m *WorkflowListRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -228,7 +348,7 @@ func (m *WorkflowUpdateRequest) Reset() { *m = WorkflowUpdateRequest{} } func (m *WorkflowUpdateRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowUpdateRequest) ProtoMessage() {} func (*WorkflowUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{3} + return fileDescriptor_192bc67c39cca05a, []int{4} } func (m *WorkflowUpdateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -279,20 +399,21 @@ func (m *WorkflowUpdateRequest) GetMemoized() bool { } type WorkflowLogRequest struct { - WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - PodName string `protobuf:"bytes,3,opt,name=PodName,proto3" json:"PodName,omitempty"` - Container string `protobuf:"bytes,4,opt,name=Container,proto3" json:"Container,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` + PodName string `protobuf:"bytes,3,opt,name=PodName,proto3" json:"PodName,omitempty"` + Container string `protobuf:"bytes,4,opt,name=Container,proto3" json:"Container,omitempty"` + LogOptions *v11.PodLogOptions `protobuf:"bytes,5,opt,name=logOptions,proto3" json:"logOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *WorkflowLogRequest) Reset() { *m = WorkflowLogRequest{} } func (m *WorkflowLogRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowLogRequest) ProtoMessage() {} func (*WorkflowLogRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{4} + return fileDescriptor_192bc67c39cca05a, []int{5} } func (m *WorkflowLogRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -349,6 +470,13 @@ func (m *WorkflowLogRequest) GetContainer() string { return "" } +func (m *WorkflowLogRequest) GetLogOptions() *v11.PodLogOptions { + if m != nil { + return m.LogOptions + } + return nil +} + type WorkflowDeleteRequest struct { WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` @@ -362,7 +490,7 @@ func (m *WorkflowDeleteRequest) Reset() { *m = WorkflowDeleteRequest{} } func (m *WorkflowDeleteRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowDeleteRequest) ProtoMessage() {} func (*WorkflowDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{5} + return fileDescriptor_192bc67c39cca05a, []int{6} } func (m *WorkflowDeleteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,6 +541,8 @@ func (m *WorkflowDeleteRequest) GetDeleteOptions() *v1.DeleteOptions { } type WorkflowDeleteResponse struct { + WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` + Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -422,7 +552,7 @@ func (m *WorkflowDeleteResponse) Reset() { *m = WorkflowDeleteResponse{} func (m *WorkflowDeleteResponse) String() string { return proto.CompactTextString(m) } func (*WorkflowDeleteResponse) ProtoMessage() {} func (*WorkflowDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{6} + return fileDescriptor_192bc67c39cca05a, []int{7} } func (m *WorkflowDeleteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -451,6 +581,20 @@ func (m *WorkflowDeleteResponse) XXX_DiscardUnknown() { var xxx_messageInfo_WorkflowDeleteResponse proto.InternalMessageInfo +func (m *WorkflowDeleteResponse) GetWorkflowName() string { + if m != nil { + return m.WorkflowName + } + return "" +} + +func (m *WorkflowDeleteResponse) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + type LogEntry struct { Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` TimeStamp *v1.Time `protobuf:"bytes,2,opt,name=timeStamp,proto3" json:"timeStamp,omitempty"` @@ -463,7 +607,7 @@ func (m *LogEntry) Reset() { *m = LogEntry{} } func (m *LogEntry) String() string { return proto.CompactTextString(m) } func (*LogEntry) ProtoMessage() {} func (*LogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{7} + return fileDescriptor_192bc67c39cca05a, []int{8} } func (m *LogEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -507,6 +651,7 @@ func (m *LogEntry) GetTimeStamp() *v1.Time { } func init() { + proto.RegisterType((*SubmitOptions)(nil), "workflow.SubmitOptions") proto.RegisterType((*WorkflowCreateRequest)(nil), "workflow.WorkflowCreateRequest") proto.RegisterType((*WorkflowGetRequest)(nil), "workflow.WorkflowGetRequest") proto.RegisterType((*WorkflowListRequest)(nil), "workflow.WorkflowListRequest") @@ -520,57 +665,77 @@ func init() { func init() { proto.RegisterFile("cmd/server/workflow/workflow.proto", fileDescriptor_192bc67c39cca05a) } var fileDescriptor_192bc67c39cca05a = []byte{ - // 796 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0xcf, 0x6e, 0xd3, 0x4e, - 0x10, 0xc7, 0xb5, 0x6d, 0x7f, 0x69, 0xb2, 0x6d, 0xf5, 0x13, 0x0b, 0x54, 0x91, 0x95, 0xfe, 0x91, - 0x25, 0xa4, 0xaa, 0xaa, 0xec, 0xa6, 0xad, 0x04, 0xaa, 0x84, 0x50, 0x29, 0x28, 0x1c, 0x02, 0x54, - 0x4e, 0x11, 0x2a, 0x9c, 0xdc, 0x64, 0x70, 0x4d, 0x62, 0xaf, 0xf1, 0x6e, 0x52, 0x95, 0xaa, 0x07, - 0x38, 0x54, 0x5c, 0x80, 0x03, 0x17, 0x8e, 0xfc, 0x3b, 0xf0, 0x00, 0x3c, 0x04, 0x47, 0x24, 0x5e, - 0x00, 0x55, 0x5c, 0x79, 0x07, 0xb4, 0x6b, 0x3b, 0xb6, 0x9b, 0xb4, 0x4a, 0x49, 0x0f, 0xb9, 0xed, - 0xce, 0x7a, 0x66, 0x3e, 0xfe, 0xee, 0xec, 0xec, 0x62, 0xb5, 0xea, 0xd4, 0x74, 0x06, 0x7e, 0x0b, - 0x7c, 0x7d, 0x97, 0xfa, 0xf5, 0x27, 0x0d, 0xba, 0xdb, 0x1e, 0x68, 0x9e, 0x4f, 0x39, 0x25, 0xd9, - 0x68, 0xae, 0x5c, 0xb2, 0xa8, 0x45, 0xa5, 0x51, 0x17, 0xa3, 0x60, 0x5d, 0x29, 0x58, 0x94, 0x5a, - 0x0d, 0xd0, 0x4d, 0xcf, 0xd6, 0x4d, 0xd7, 0xa5, 0xdc, 0xe4, 0x36, 0x75, 0x59, 0xb8, 0xba, 0x52, - 0xbf, 0xc6, 0x34, 0x9b, 0x8a, 0x55, 0xc7, 0xac, 0xee, 0xd8, 0x2e, 0xf8, 0x7b, 0xba, 0x57, 0xb7, - 0x84, 0x81, 0xe9, 0x0e, 0x70, 0x53, 0x6f, 0x15, 0x75, 0x0b, 0x5c, 0xf0, 0x4d, 0x0e, 0xb5, 0xd0, - 0x6b, 0xdd, 0xb2, 0xf9, 0x4e, 0x73, 0x5b, 0xab, 0x52, 0x47, 0x37, 0x7d, 0x99, 0xf4, 0xa9, 0x1c, - 0xc4, 0xae, 0x6d, 0xdc, 0x56, 0xd1, 0x6c, 0x78, 0x3b, 0x66, 0x47, 0x10, 0xf5, 0x0f, 0xc2, 0x97, - 0x1f, 0x86, 0x5f, 0xad, 0xfb, 0x60, 0x72, 0x30, 0xe0, 0x59, 0x13, 0x18, 0x27, 0x05, 0x9c, 0xbb, - 0x67, 0x3a, 0xc0, 0x3c, 0xb3, 0x0a, 0x79, 0x34, 0x8b, 0xe6, 0x72, 0x46, 0x6c, 0x20, 0x8f, 0x71, - 0x2e, 0x72, 0x63, 0xf9, 0xa1, 0x59, 0x34, 0x37, 0xb6, 0x74, 0x5d, 0x8b, 0x81, 0xb4, 0x08, 0x48, - 0x0e, 0x34, 0xaf, 0x6e, 0x69, 0x02, 0x48, 0x6b, 0xcb, 0x16, 0x01, 0x69, 0x51, 0x14, 0x23, 0x8e, - 0x47, 0xb6, 0xf0, 0x44, 0xc0, 0x72, 0xdf, 0x93, 0x32, 0xe5, 0x87, 0x65, 0x82, 0x65, 0x2d, 0xd0, - 0x49, 0x4b, 0xea, 0x14, 0xc7, 0x16, 0x3a, 0x69, 0xad, 0xa2, 0x96, 0x72, 0x35, 0xd2, 0x91, 0xd4, - 0xaf, 0x08, 0x93, 0x28, 0x51, 0x09, 0x78, 0xf4, 0xb3, 0x2a, 0x1e, 0x8f, 0xac, 0xe2, 0x1f, 0xc3, - 0xff, 0x4d, 0xd9, 0xd2, 0x82, 0x0c, 0x1d, 0x17, 0x64, 0x03, 0xe3, 0x12, 0xf0, 0x34, 0xf0, 0x62, - 0x6f, 0xc0, 0xb1, 0x9f, 0x91, 0x88, 0xa1, 0xbe, 0x42, 0xf8, 0x62, 0x04, 0x50, 0xb6, 0x19, 0xef, - 0x6d, 0x63, 0x2a, 0x78, 0x4c, 0x7c, 0x1c, 0x81, 0x04, 0x5b, 0x53, 0xec, 0x0d, 0x24, 0xe1, 0x68, - 0x24, 0xa3, 0xa8, 0xcd, 0xb8, 0x48, 0x1e, 0x78, 0xb5, 0x44, 0x91, 0xf4, 0xaf, 0x9b, 0x82, 0xb3, - 0x77, 0xc1, 0xa1, 0xf6, 0x73, 0xa8, 0x49, 0xd5, 0xb2, 0x46, 0x7b, 0xae, 0xbe, 0x49, 0x6c, 0x56, - 0x99, 0x5a, 0xe7, 0x97, 0x34, 0x8f, 0x47, 0x37, 0x68, 0x4d, 0x3a, 0x0f, 0xcb, 0xb5, 0x68, 0x2a, - 0xfc, 0xd6, 0xa9, 0xcb, 0x4d, 0xa1, 0x50, 0x7e, 0x24, 0xf0, 0x6b, 0x1b, 0xd4, 0x6f, 0x89, 0xd3, - 0x72, 0x0b, 0x1a, 0x70, 0x9e, 0x42, 0x6c, 0xe1, 0x89, 0x20, 0xe4, 0x3f, 0x15, 0x7d, 0xca, 0xd5, - 0x48, 0x47, 0x52, 0xf3, 0x78, 0xf2, 0x38, 0x35, 0xf3, 0xa8, 0xcb, 0x40, 0x75, 0x71, 0xb6, 0x4c, - 0xad, 0xdb, 0x2e, 0xf7, 0xf7, 0x84, 0x28, 0x55, 0xea, 0x72, 0x70, 0x79, 0x48, 0x1f, 0x4d, 0xc9, - 0x1d, 0x9c, 0xe3, 0xb6, 0x03, 0x15, 0x6e, 0x3a, 0x5e, 0x58, 0x51, 0xf3, 0xbd, 0x61, 0x6d, 0xda, - 0x0e, 0x18, 0xb1, 0xf3, 0xd2, 0xeb, 0x71, 0xfc, 0x7f, 0x84, 0x52, 0x01, 0xbf, 0x65, 0x57, 0x81, - 0x1c, 0x22, 0x9c, 0x09, 0x0e, 0x29, 0x99, 0x89, 0x1b, 0x44, 0xd7, 0xa6, 0xa4, 0xf4, 0xd7, 0x63, - 0xd4, 0xc2, 0xcb, 0x9f, 0xbf, 0xdf, 0x0d, 0x4d, 0xaa, 0x17, 0x64, 0x23, 0x6e, 0x15, 0xdb, 0x0d, - 0x92, 0xad, 0xa2, 0x79, 0xf2, 0x1e, 0xe1, 0xe1, 0x12, 0x70, 0x52, 0xe8, 0xa4, 0x88, 0x5b, 0x45, - 0xbf, 0x08, 0x2b, 0x12, 0x41, 0x23, 0x0b, 0x1d, 0x08, 0xfa, 0x7e, 0xbb, 0x18, 0x0e, 0xf4, 0xfd, - 0x64, 0xe5, 0x1c, 0x90, 0xb7, 0x08, 0x8f, 0x88, 0x03, 0x49, 0xa6, 0x3a, 0xd9, 0x12, 0xbd, 0x41, - 0x59, 0xeb, 0x0b, 0x4e, 0x44, 0x52, 0xaf, 0x48, 0xc0, 0x19, 0x32, 0x75, 0x2a, 0x20, 0x79, 0x81, - 0x70, 0x26, 0x28, 0xa6, 0x6e, 0xbb, 0x96, 0x3a, 0x1c, 0xca, 0xec, 0xc9, 0x1f, 0x84, 0x75, 0x18, - 0xaa, 0x32, 0x7f, 0x36, 0x55, 0x3e, 0x20, 0xfc, 0x9f, 0x01, 0xa2, 0x76, 0xbb, 0x20, 0xa4, 0x1a, - 0x55, 0xbf, 0xbb, 0x76, 0x55, 0xf2, 0x15, 0x95, 0x33, 0xf1, 0x89, 0x9a, 0xfa, 0x8c, 0x70, 0xd6, - 0x00, 0xd6, 0xdc, 0x76, 0x6c, 0x3e, 0xb8, 0x94, 0x1f, 0x11, 0xce, 0x08, 0x4a, 0x07, 0x06, 0x97, - 0xf1, 0x13, 0xc2, 0xa3, 0x95, 0x26, 0xf3, 0xc0, 0xad, 0x0d, 0x2e, 0xe4, 0x17, 0x84, 0x73, 0x9b, - 0xe0, 0x3b, 0xb6, 0x7b, 0x42, 0x3b, 0x1b, 0x0c, 0xcc, 0x43, 0x24, 0x2f, 0xc0, 0x32, 0xb5, 0x58, - 0xb7, 0x6e, 0x17, 0xdf, 0xb5, 0x0a, 0x89, 0x57, 0xa3, 0x8b, 0x42, 0x2d, 0xc9, 0xb4, 0x6b, 0xe4, - 0xc6, 0xf1, 0xb4, 0xa7, 0x64, 0xd5, 0x3d, 0x5a, 0x63, 0xfa, 0x7e, 0x78, 0xcd, 0x1e, 0xe8, 0x0d, - 0x6a, 0xb1, 0x45, 0x74, 0x73, 0xf5, 0xfb, 0xd1, 0x34, 0xfa, 0x71, 0x34, 0x8d, 0x7e, 0x1d, 0x4d, - 0xa3, 0x47, 0x0b, 0x27, 0xbe, 0x68, 0xbb, 0x3c, 0xc1, 0xb7, 0x33, 0xf2, 0x05, 0xbb, 0xfc, 0x37, - 0x00, 0x00, 0xff, 0xff, 0xa9, 0xf3, 0x79, 0xef, 0xa0, 0x0b, 0x00, 0x00, + // 1111 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x97, 0xcf, 0x6e, 0xe4, 0xc4, + 0x13, 0xc7, 0xd5, 0xf9, 0x33, 0x99, 0x74, 0x36, 0xf9, 0xe9, 0xd7, 0x40, 0x18, 0x99, 0x6c, 0x36, + 0x6b, 0x09, 0x14, 0x45, 0x2b, 0x3b, 0x99, 0x64, 0x61, 0xc9, 0x2a, 0x2c, 0x21, 0x41, 0x61, 0xa5, + 0x81, 0x8d, 0x3c, 0x41, 0xab, 0x20, 0x2e, 0x1d, 0x4f, 0xad, 0x63, 0x32, 0xee, 0x36, 0xee, 0x9e, + 0x89, 0x42, 0x94, 0x03, 0x1c, 0x56, 0x70, 0xe2, 0xc0, 0x85, 0x3b, 0x12, 0x5a, 0xad, 0x10, 0x27, + 0x1e, 0x82, 0x23, 0x88, 0x17, 0x40, 0x11, 0x12, 0xe2, 0x2d, 0x50, 0xb7, 0xed, 0xb1, 0x3d, 0x99, + 0xac, 0x66, 0x48, 0xe6, 0xe6, 0xae, 0x72, 0x55, 0x7f, 0xfc, 0xad, 0x72, 0xb5, 0x8d, 0x4d, 0x37, + 0x68, 0xd8, 0x02, 0xa2, 0x36, 0x44, 0xf6, 0x31, 0x8f, 0x8e, 0x9e, 0x34, 0xf9, 0x71, 0xe7, 0xc2, + 0x0a, 0x23, 0x2e, 0x39, 0x29, 0xa7, 0x6b, 0xe3, 0x65, 0x8f, 0x7b, 0x5c, 0x1b, 0x6d, 0x75, 0x15, + 0xfb, 0x8d, 0x39, 0x8f, 0x73, 0xaf, 0x09, 0x36, 0x0d, 0x7d, 0x9b, 0x32, 0xc6, 0x25, 0x95, 0x3e, + 0x67, 0x22, 0xf1, 0xae, 0x1d, 0xdd, 0x13, 0x96, 0xcf, 0x95, 0x37, 0xa0, 0xee, 0xa1, 0xcf, 0x20, + 0x3a, 0xb1, 0xc3, 0x23, 0x4f, 0x19, 0x84, 0x1d, 0x80, 0xa4, 0x76, 0x7b, 0xc5, 0xf6, 0x80, 0x41, + 0x44, 0x25, 0x34, 0x92, 0xa8, 0x2d, 0xcf, 0x97, 0x87, 0xad, 0x03, 0xcb, 0xe5, 0x81, 0x4d, 0x23, + 0xbd, 0xe9, 0x67, 0xfa, 0x22, 0x0b, 0xed, 0xe0, 0xb6, 0x57, 0x68, 0x33, 0x3c, 0xa4, 0x17, 0x93, + 0x98, 0xd9, 0xd6, 0xb6, 0xcb, 0x23, 0xe8, 0xb1, 0x91, 0xf9, 0xcf, 0x08, 0x9e, 0xae, 0xb7, 0x0e, + 0x02, 0x5f, 0x3e, 0x0a, 0x35, 0x36, 0x21, 0x78, 0xec, 0x23, 0x1a, 0x40, 0x05, 0x2d, 0xa0, 0xc5, + 0x49, 0x47, 0x5f, 0x13, 0x13, 0xdf, 0xd8, 0x49, 0x02, 0xb5, 0x6f, 0x44, 0xfb, 0x0a, 0x36, 0x32, + 0x8f, 0xf1, 0x43, 0x26, 0x24, 0x65, 0x2e, 0x3c, 0xdc, 0xae, 0x8c, 0xea, 0x3b, 0x72, 0x16, 0xe5, + 0x7f, 0x9f, 0xc9, 0xe8, 0x24, 0xe4, 0x3e, 0x93, 0x95, 0xb1, 0xd8, 0x9f, 0x59, 0x94, 0x7f, 0x97, + 0x46, 0x34, 0x00, 0x09, 0x91, 0xa8, 0x8c, 0x2f, 0x8c, 0x2a, 0x7f, 0x66, 0x21, 0x6f, 0xe0, 0x99, + 0x3a, 0x44, 0x6d, 0xdf, 0x85, 0x4d, 0xd7, 0xe5, 0x2d, 0x26, 0x2b, 0x25, 0x9d, 0xa3, 0xcb, 0xaa, + 0x58, 0xeb, 0xba, 0xa0, 0xdb, 0xd1, 0x89, 0xd3, 0x62, 0x95, 0x89, 0x05, 0xb4, 0x58, 0x76, 0x0a, + 0x36, 0x32, 0x8b, 0x4b, 0x35, 0x7a, 0x00, 0x4d, 0x51, 0x29, 0xeb, 0x1c, 0xc9, 0x8a, 0x7c, 0x8a, + 0x67, 0x1e, 0x1d, 0x33, 0x88, 0x1c, 0x78, 0x02, 0x11, 0x30, 0x17, 0x2a, 0x93, 0x0b, 0x68, 0x71, + 0xaa, 0xba, 0x66, 0xc5, 0x52, 0x5a, 0xf9, 0x2a, 0x5a, 0xe1, 0x91, 0xa7, 0x0c, 0xc2, 0x52, 0x55, + 0xb4, 0xda, 0x2b, 0x56, 0x31, 0xd6, 0xe9, 0xca, 0x65, 0x3e, 0x1f, 0xc1, 0xaf, 0x3c, 0x4e, 0xaa, + 0xb6, 0x15, 0x01, 0x95, 0xe0, 0xc0, 0xe7, 0x2d, 0x10, 0x92, 0xcc, 0xe1, 0x49, 0xa5, 0xa1, 0x08, + 0xa9, 0x9b, 0x0a, 0x9f, 0x19, 0xc8, 0x3e, 0x2e, 0xa7, 0x61, 0x5a, 0xf9, 0xa9, 0xea, 0x86, 0x95, + 0xf5, 0x87, 0x95, 0xf6, 0x87, 0xbe, 0xc8, 0xa0, 0x3a, 0x5d, 0x9c, 0xf6, 0x87, 0x95, 0x26, 0x71, + 0x3a, 0xe9, 0xc8, 0x3e, 0x9e, 0x8e, 0x49, 0x92, 0xea, 0xeb, 0xba, 0x4d, 0x55, 0x57, 0xfb, 0x7b, + 0xde, 0x42, 0xa8, 0x53, 0xcc, 0x44, 0x36, 0xba, 0x1a, 0x4b, 0x97, 0x7c, 0xaa, 0xfa, 0x6a, 0x06, + 0x56, 0x70, 0x3b, 0xc5, 0xbb, 0xcd, 0x67, 0x08, 0x93, 0x14, 0x73, 0x07, 0x64, 0xaa, 0x94, 0x89, + 0x6f, 0xa4, 0xd6, 0x5c, 0x97, 0x16, 0x6c, 0x45, 0x35, 0x47, 0xba, 0xd5, 0xdc, 0xc5, 0x78, 0x07, + 0x64, 0xf1, 0x79, 0x97, 0xfb, 0x7b, 0xde, 0x2c, 0xce, 0xc9, 0xe5, 0x30, 0xbf, 0x46, 0xf8, 0xa5, + 0x14, 0xa0, 0xe6, 0x0b, 0xd9, 0x5f, 0x55, 0xeb, 0x78, 0x4a, 0xdd, 0x9c, 0x82, 0xc4, 0x85, 0x5d, + 0xe9, 0x0f, 0x24, 0x17, 0xe8, 0xe4, 0xb3, 0x98, 0xad, 0xac, 0xc3, 0x3e, 0x0e, 0x1b, 0xb9, 0x0e, + 0xbb, 0xba, 0x6e, 0x06, 0x2e, 0x7f, 0x08, 0x01, 0xf7, 0xbf, 0x80, 0x86, 0x56, 0xad, 0xec, 0x74, + 0xd6, 0xe6, 0xef, 0xb9, 0x62, 0xd5, 0xb8, 0x77, 0x7d, 0x9b, 0x56, 0xf0, 0xc4, 0x2e, 0x6f, 0xe8, + 0xe0, 0x78, 0xa2, 0xa4, 0x4b, 0x15, 0xb7, 0xc5, 0x99, 0xa4, 0x4a, 0xa1, 0x64, 0x9a, 0x64, 0x06, + 0xb2, 0x89, 0x71, 0x93, 0x7b, 0xa9, 0xb6, 0xe3, 0x5a, 0xdb, 0xdb, 0x39, 0x6d, 0x2d, 0x35, 0x0f, + 0x95, 0x92, 0xbb, 0xbc, 0x51, 0xeb, 0xdc, 0xe8, 0xe4, 0x82, 0xcc, 0x5f, 0x50, 0xa6, 0xe5, 0x36, + 0x34, 0xe1, 0x3a, 0xb5, 0xdc, 0xc7, 0xd3, 0x71, 0xca, 0xff, 0xf4, 0xda, 0x15, 0x42, 0x9d, 0x62, + 0x26, 0x73, 0x0f, 0xcf, 0x76, 0x53, 0x8b, 0x90, 0x33, 0x01, 0x7d, 0x61, 0xcf, 0xe2, 0x52, 0x5d, + 0x52, 0xd9, 0x12, 0x09, 0x73, 0xb2, 0x32, 0x19, 0x2e, 0xd7, 0xb8, 0xa7, 0xa7, 0xb5, 0xaa, 0x89, + 0xcb, 0x99, 0x04, 0x26, 0x93, 0x14, 0xe9, 0x92, 0x7c, 0x80, 0x27, 0xa5, 0x1f, 0x40, 0x5d, 0xd2, + 0x20, 0x4c, 0x1a, 0x7a, 0xa9, 0xbf, 0x47, 0xda, 0xf3, 0x03, 0x70, 0xb2, 0xe0, 0xea, 0xdf, 0x33, + 0xf8, 0x7f, 0x29, 0x58, 0x32, 0xdf, 0xc9, 0x53, 0x84, 0x4b, 0xf1, 0x88, 0x21, 0xb7, 0xb2, 0x21, + 0xd2, 0x73, 0xa0, 0x1a, 0x57, 0x1b, 0x90, 0xe6, 0xdc, 0x57, 0x7f, 0xfc, 0xf5, 0xdd, 0xc8, 0xac, + 0xf9, 0x7f, 0x7d, 0x76, 0xb6, 0x57, 0x3a, 0x87, 0xad, 0x58, 0x47, 0x4b, 0xe4, 0x7b, 0x84, 0x47, + 0x77, 0x40, 0x92, 0xb9, 0x8b, 0x14, 0xd9, 0xa4, 0xba, 0x2a, 0xc2, 0x9a, 0x46, 0xb0, 0xc8, 0x9d, + 0x0b, 0x08, 0xf6, 0x69, 0xa7, 0x91, 0xce, 0xec, 0xd3, 0x7c, 0xf9, 0xce, 0xc8, 0xb7, 0x08, 0x8f, + 0xa9, 0x79, 0x40, 0x6e, 0x5e, 0x64, 0xcb, 0x8d, 0x26, 0x63, 0xf3, 0x4a, 0x70, 0x2a, 0x93, 0xf9, + 0xba, 0x06, 0xbc, 0x45, 0x6e, 0xbe, 0x10, 0x90, 0x7c, 0x89, 0x70, 0x29, 0x6e, 0xc4, 0x5e, 0x55, + 0x2b, 0xbc, 0x58, 0xc6, 0xc2, 0xe5, 0x37, 0xc4, 0x3d, 0x9c, 0xaa, 0xb2, 0x34, 0x98, 0x2a, 0x3f, + 0x22, 0x3c, 0xee, 0x80, 0xea, 0xdd, 0x1e, 0x08, 0x85, 0x39, 0x79, 0xd5, 0xaa, 0x6d, 0x68, 0xbe, + 0xb7, 0x8c, 0xea, 0x20, 0x7c, 0x76, 0xa4, 0xd8, 0x54, 0x67, 0xfd, 0x84, 0x70, 0xd9, 0x01, 0xa1, + 0x0f, 0xc2, 0xa1, 0xb3, 0xbe, 0xab, 0x59, 0xd7, 0x8d, 0xbb, 0x03, 0xb2, 0xc6, 0x78, 0x0a, 0xf7, + 0x19, 0xc2, 0x25, 0x85, 0x1b, 0xc0, 0xd0, 0x61, 0xdf, 0xd1, 0xb0, 0xf7, 0x8c, 0xd5, 0x81, 0x61, + 0x03, 0x50, 0xa8, 0xcf, 0x11, 0x9e, 0xa8, 0xb7, 0x44, 0x08, 0xac, 0x31, 0x74, 0xd6, 0x07, 0x9a, + 0xf5, 0x6d, 0x63, 0x6d, 0x20, 0x56, 0x11, 0xd3, 0x29, 0xd8, 0x9f, 0x11, 0x9e, 0xdc, 0x83, 0x28, + 0xf0, 0xd9, 0x25, 0xc3, 0xee, 0x5a, 0x71, 0x37, 0x35, 0xee, 0x7d, 0xe3, 0xcd, 0x81, 0x70, 0x65, + 0xca, 0xa7, 0x80, 0xbf, 0xd1, 0x63, 0x87, 0xc9, 0xa1, 0x0f, 0xe6, 0xdb, 0x9a, 0xf5, 0x35, 0x73, + 0xf6, 0x22, 0x6b, 0xd3, 0x67, 0xba, 0x29, 0x9f, 0x22, 0xfd, 0xcd, 0x50, 0xe3, 0x9e, 0xe8, 0x35, + 0xa1, 0xb3, 0xcf, 0x13, 0x83, 0x64, 0xde, 0xf4, 0x70, 0x33, 0x77, 0xf4, 0x06, 0x9b, 0xe4, 0x41, + 0xf7, 0x06, 0x2f, 0xd2, 0x22, 0xe4, 0x0d, 0x61, 0x9f, 0x26, 0x5f, 0x26, 0x67, 0x76, 0x93, 0x7b, + 0x62, 0x19, 0x91, 0x1f, 0x10, 0x1e, 0x7f, 0x4c, 0xa5, 0x7b, 0x38, 0xdc, 0x83, 0xe2, 0xbe, 0x26, + 0xbe, 0x4b, 0x3a, 0x6f, 0x86, 0x90, 0x11, 0xd0, 0xa0, 0xaf, 0x2a, 0x2e, 0xa3, 0xf7, 0xd6, 0x7f, + 0x3d, 0x9f, 0x47, 0xbf, 0x9d, 0xcf, 0xa3, 0x3f, 0xcf, 0xe7, 0xd1, 0x27, 0x77, 0x2e, 0xfd, 0xef, + 0xec, 0xf1, 0xa3, 0x7c, 0x50, 0xd2, 0xff, 0x90, 0xab, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x9f, + 0x69, 0xa8, 0xf0, 0x46, 0x0f, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -594,8 +759,11 @@ type WorkflowServiceClient interface { Resume(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) Suspend(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) Terminate(ctx context.Context, in *WorkflowUpdateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) + Lint(ctx context.Context, in *WorkflowCreateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) // PodLogs returns stream of log entries for the specified pod. Pod PodLogs(ctx context.Context, in *WorkflowLogRequest, opts ...grpc.CallOption) (WorkflowService_PodLogsClient, error) + // Watch returns stream of application change events. + Watch(ctx context.Context, in *WorkflowGetRequest, opts ...grpc.CallOption) (WorkflowService_WatchClient, error) } type workflowServiceClient struct { @@ -687,6 +855,15 @@ func (c *workflowServiceClient) Terminate(ctx context.Context, in *WorkflowUpdat return out, nil } +func (c *workflowServiceClient) Lint(ctx context.Context, in *WorkflowCreateRequest, opts ...grpc.CallOption) (*v1alpha1.Workflow, error) { + out := new(v1alpha1.Workflow) + err := c.cc.Invoke(ctx, "/workflow.WorkflowService/Lint", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *workflowServiceClient) PodLogs(ctx context.Context, in *WorkflowLogRequest, opts ...grpc.CallOption) (WorkflowService_PodLogsClient, error) { stream, err := c.cc.NewStream(ctx, &_WorkflowService_serviceDesc.Streams[0], "/workflow.WorkflowService/PodLogs", opts...) if err != nil { @@ -719,6 +896,38 @@ func (x *workflowServicePodLogsClient) Recv() (*LogEntry, error) { return m, nil } +func (c *workflowServiceClient) Watch(ctx context.Context, in *WorkflowGetRequest, opts ...grpc.CallOption) (WorkflowService_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_WorkflowService_serviceDesc.Streams[1], "/workflow.WorkflowService/Watch", opts...) + if err != nil { + return nil, err + } + x := &workflowServiceWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type WorkflowService_WatchClient interface { + Recv() (*v1alpha1.Workflow, error) + grpc.ClientStream +} + +type workflowServiceWatchClient struct { + grpc.ClientStream +} + +func (x *workflowServiceWatchClient) Recv() (*v1alpha1.Workflow, error) { + m := new(v1alpha1.Workflow) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // WorkflowServiceServer is the server API for WorkflowService service. type WorkflowServiceServer interface { Create(context.Context, *WorkflowCreateRequest) (*v1alpha1.Workflow, error) @@ -730,8 +939,11 @@ type WorkflowServiceServer interface { Resume(context.Context, *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) Suspend(context.Context, *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) Terminate(context.Context, *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) + Lint(context.Context, *WorkflowCreateRequest) (*v1alpha1.Workflow, error) // PodLogs returns stream of log entries for the specified pod. Pod PodLogs(*WorkflowLogRequest, WorkflowService_PodLogsServer) error + // Watch returns stream of application change events. + Watch(*WorkflowGetRequest, WorkflowService_WatchServer) error } // UnimplementedWorkflowServiceServer can be embedded to have forward compatible implementations. @@ -765,9 +977,15 @@ func (*UnimplementedWorkflowServiceServer) Suspend(ctx context.Context, req *Wor func (*UnimplementedWorkflowServiceServer) Terminate(ctx context.Context, req *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { return nil, status.Errorf(codes.Unimplemented, "method Terminate not implemented") } +func (*UnimplementedWorkflowServiceServer) Lint(ctx context.Context, req *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { + return nil, status.Errorf(codes.Unimplemented, "method Lint not implemented") +} func (*UnimplementedWorkflowServiceServer) PodLogs(req *WorkflowLogRequest, srv WorkflowService_PodLogsServer) error { return status.Errorf(codes.Unimplemented, "method PodLogs not implemented") } +func (*UnimplementedWorkflowServiceServer) Watch(req *WorkflowGetRequest, srv WorkflowService_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} func RegisterWorkflowServiceServer(s *grpc.Server, srv WorkflowServiceServer) { s.RegisterService(&_WorkflowService_serviceDesc, srv) @@ -935,6 +1153,24 @@ func _WorkflowService_Terminate_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _WorkflowService_Lint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowServiceServer).Lint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflow.WorkflowService/Lint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowServiceServer).Lint(ctx, req.(*WorkflowCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _WorkflowService_PodLogs_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(WorkflowLogRequest) if err := stream.RecvMsg(m); err != nil { @@ -956,6 +1192,27 @@ func (x *workflowServicePodLogsServer) Send(m *LogEntry) error { return x.ServerStream.SendMsg(m) } +func _WorkflowService_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WorkflowGetRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(WorkflowServiceServer).Watch(m, &workflowServiceWatchServer{stream}) +} + +type WorkflowService_WatchServer interface { + Send(*v1alpha1.Workflow) error + grpc.ServerStream +} + +type workflowServiceWatchServer struct { + grpc.ServerStream +} + +func (x *workflowServiceWatchServer) Send(m *v1alpha1.Workflow) error { + return x.ServerStream.SendMsg(m) +} + var _WorkflowService_serviceDesc = grpc.ServiceDesc{ ServiceName: "workflow.WorkflowService", HandlerType: (*WorkflowServiceServer)(nil), @@ -996,6 +1253,10 @@ var _WorkflowService_serviceDesc = grpc.ServiceDesc{ MethodName: "Terminate", Handler: _WorkflowService_Terminate_Handler, }, + { + MethodName: "Lint", + Handler: _WorkflowService_Lint_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1003,10 +1264,115 @@ var _WorkflowService_serviceDesc = grpc.ServiceDesc{ Handler: _WorkflowService_PodLogs_Handler, ServerStreams: true, }, + { + StreamName: "Watch", + Handler: _WorkflowService_Watch_Handler, + ServerStreams: true, + }, }, Metadata: "cmd/server/workflow/workflow.proto", } +func (m *SubmitOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubmitOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubmitOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.OwnerReference != nil { + { + size, err := m.OwnerReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.Labels) > 0 { + i -= len(m.Labels) + copy(dAtA[i:], m.Labels) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Labels))) + i-- + dAtA[i] = 0x42 + } + if m.ServerDryRun { + i-- + if m.ServerDryRun { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.ServiceAccount) > 0 { + i -= len(m.ServiceAccount) + copy(dAtA[i:], m.ServiceAccount) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.ServiceAccount))) + i-- + dAtA[i] = 0x32 + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Parameters[iNdEx]) + copy(dAtA[i:], m.Parameters[iNdEx]) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Parameters[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Entrypoint) > 0 { + i -= len(m.Entrypoint) + copy(dAtA[i:], m.Entrypoint) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Entrypoint))) + i-- + dAtA[i] = 0x22 + } + if len(m.InstanceID) > 0 { + i -= len(m.InstanceID) + copy(dAtA[i:], m.InstanceID) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.InstanceID))) + i-- + dAtA[i] = 0x1a + } + if len(m.GenerateName) > 0 { + i -= len(m.GenerateName) + copy(dAtA[i:], m.GenerateName) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.GenerateName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *WorkflowCreateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1031,6 +1397,18 @@ func (m *WorkflowCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.SubmitOptions != nil { + { + size, err := m.SubmitOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } if m.CreateOptions != nil { { size, err := m.CreateOptions.MarshalToSizedBuffer(dAtA[:i]) @@ -1043,9 +1421,9 @@ func (m *WorkflowCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1a } - if m.Workflows != nil { + if m.Workflow != nil { { - size, err := m.Workflows.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Workflow.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1239,14 +1617,26 @@ func (m *WorkflowLogRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Container) > 0 { - i -= len(m.Container) - copy(dAtA[i:], m.Container) - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Container))) - i-- - dAtA[i] = 0x22 - } - if len(m.PodName) > 0 { + if m.LogOptions != nil { + { + size, err := m.LogOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflow(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Container) > 0 { + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x22 + } + if len(m.PodName) > 0 { i -= len(m.PodName) copy(dAtA[i:], m.PodName) i = encodeVarintWorkflow(dAtA, i, uint64(len(m.PodName))) @@ -1347,6 +1737,20 @@ func (m *WorkflowDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + } + if len(m.WorkflowName) > 0 { + i -= len(m.WorkflowName) + copy(dAtA[i:], m.WorkflowName) + i = encodeVarintWorkflow(dAtA, i, uint64(len(m.WorkflowName))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } @@ -1407,6 +1811,55 @@ func encodeVarintWorkflow(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } +func (m *SubmitOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.GenerateName) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.InstanceID) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.Entrypoint) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if len(m.Parameters) > 0 { + for _, s := range m.Parameters { + l = len(s) + n += 1 + l + sovWorkflow(uint64(l)) + } + } + l = len(m.ServiceAccount) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.ServerDryRun { + n += 2 + } + l = len(m.Labels) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.OwnerReference != nil { + l = m.OwnerReference.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *WorkflowCreateRequest) Size() (n int) { if m == nil { return 0 @@ -1417,14 +1870,18 @@ func (m *WorkflowCreateRequest) Size() (n int) { if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } - if m.Workflows != nil { - l = m.Workflows.Size() + if m.Workflow != nil { + l = m.Workflow.Size() n += 1 + l + sovWorkflow(uint64(l)) } if m.CreateOptions != nil { l = m.CreateOptions.Size() n += 1 + l + sovWorkflow(uint64(l)) } + if m.SubmitOptions != nil { + l = m.SubmitOptions.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1520,6 +1977,10 @@ func (m *WorkflowLogRequest) Size() (n int) { if l > 0 { n += 1 + l + sovWorkflow(uint64(l)) } + if m.LogOptions != nil { + l = m.LogOptions.Size() + n += 1 + l + sovWorkflow(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1556,6 +2017,14 @@ func (m *WorkflowDeleteResponse) Size() (n int) { } var l int _ = l + l = len(m.WorkflowName) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sovWorkflow(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1588,6 +2057,340 @@ func sovWorkflow(x uint64) (n int) { func sozWorkflow(x uint64) (n int) { return sovWorkflow(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (m *SubmitOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubmitOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubmitOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InstanceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InstanceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entrypoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entrypoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerDryRun", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ServerDryRun = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OwnerReference == nil { + m.OwnerReference = &v1.OwnerReference{} + } + if err := m.OwnerReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflow(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflow + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1651,7 +2454,7 @@ func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1678,10 +2481,10 @@ func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Workflows == nil { - m.Workflows = &v1alpha1.Workflow{} + if m.Workflow == nil { + m.Workflow = &v1alpha1.Workflow{} } - if err := m.Workflows.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Workflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1721,6 +2524,42 @@ func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubmitOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SubmitOptions == nil { + m.SubmitOptions = &SubmitOptions{} + } + if err := m.SubmitOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipWorkflow(dAtA[iNdEx:]) @@ -2317,6 +3156,42 @@ func (m *WorkflowLogRequest) Unmarshal(dAtA []byte) error { } m.Container = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogOptions == nil { + m.LogOptions = &v11.PodLogOptions{} + } + if err := m.LogOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipWorkflow(dAtA[iNdEx:]) @@ -2525,6 +3400,70 @@ func (m *WorkflowDeleteResponse) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: WorkflowDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkflowName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflow + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflow + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipWorkflow(dAtA[iNdEx:]) diff --git a/cmd/server/workflow/workflow.pb.gw.go b/cmd/server/workflow/workflow.pb.gw.go index a6a88a09cc02..715811bcd51b 100644 --- a/cmd/server/workflow/workflow.pb.gw.go +++ b/cmd/server/workflow/workflow.pb.gw.go @@ -773,6 +773,40 @@ func local_request_WorkflowService_Terminate_0(ctx context.Context, marshaler ru } +func request_WorkflowService_Lint_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowCreateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Lint(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowService_Lint_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowCreateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Lint(ctx, &protoReq) + return msg, metadata, err + +} + var ( filter_WorkflowService_PodLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0, "WorkflowName": 1, "PodName": 2}, Base: []int{1, 1, 2, 3, 0, 0, 0}, Check: []int{0, 1, 1, 1, 2, 3, 4}} ) @@ -841,6 +875,63 @@ func request_WorkflowService_PodLogs_0(ctx context.Context, marshaler runtime.Ma } +var ( + filter_WorkflowService_Watch_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0, "WorkflowName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_WorkflowService_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (WorkflowService_WatchClient, runtime.ServerMetadata, error) { + var protoReq WorkflowGetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["Namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + } + + val, ok = pathParams["WorkflowName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + } + + protoReq.WorkflowName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowService_Watch_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.Watch(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + // RegisterWorkflowServiceHandlerServer registers the http handlers for service WorkflowService to "mux". // UnaryRPC :call WorkflowServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -1026,6 +1117,26 @@ func RegisterWorkflowServiceHandlerServer(ctx context.Context, mux *runtime.Serv }) + mux.Handle("POST", pattern_WorkflowService_Lint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowService_Lint_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Lint_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_WorkflowService_PodLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) @@ -1033,6 +1144,13 @@ func RegisterWorkflowServiceHandlerServer(ctx context.Context, mux *runtime.Serv return }) + mux.Handle("GET", pattern_WorkflowService_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + return nil } @@ -1254,6 +1372,26 @@ func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.Serv }) + mux.Handle("POST", pattern_WorkflowService_Lint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Lint_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Lint_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + mux.Handle("GET", pattern_WorkflowService_PodLogs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -1274,6 +1412,26 @@ func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.Serv }) + mux.Handle("GET", pattern_WorkflowService_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowService_Watch_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowService_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -1286,17 +1444,21 @@ var ( pattern_WorkflowService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Retry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Retry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "retry"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Resubmit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Resubmit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "resubmit"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Resume_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Resume_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "resume"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Suspend_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Suspend_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "suspend"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Terminate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Terminate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "terminate"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_Lint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "workflows", "lint"}, "", runtime.AssumeColonVerbOpt(true))) pattern_WorkflowService_PodLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6, 2, 7}, []string{"api", "v1", "workflow", "Namespace", "WorkflowName", "pods", "PodName", "logs"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowService_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "stream", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( @@ -1318,5 +1480,9 @@ var ( forward_WorkflowService_Terminate_0 = runtime.ForwardResponseMessage + forward_WorkflowService_Lint_0 = runtime.ForwardResponseMessage + forward_WorkflowService_PodLogs_0 = runtime.ForwardResponseStream + + forward_WorkflowService_Watch_0 = runtime.ForwardResponseStream ) diff --git a/cmd/server/workflow/workflow.proto b/cmd/server/workflow/workflow.proto index b2cf2e2feea5..b629868499b4 100644 --- a/cmd/server/workflow/workflow.proto +++ b/cmd/server/workflow/workflow.proto @@ -6,20 +6,35 @@ import "gogoproto/gogo.proto"; import "google/api/annotations.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1/generated.proto"; +import "k8s.io/api/core/v1/generated.proto"; // Workflow Service // // Workflow Service API performs CRUD actions against application resources package workflow; +message SubmitOptions{ + string Name = 1; + string GenerateName = 2; + string InstanceID = 3; + string Entrypoint = 4; + repeated string Parameters = 5; + string ServiceAccount = 6; + bool ServerDryRun = 7; + string Labels = 8; + k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference OwnerReference = 9; + +} + message WorkflowCreateRequest{ string Namespace = 1; - github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow Workflows = 2; + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow Workflow = 2; k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions CreateOptions = 3; + SubmitOptions SubmitOptions = 4; } message WorkflowGetRequest{ - string WorkflowName =1; + string WorkflowName = 1; string Namespace = 2; k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions GetOptions =3; } @@ -30,25 +45,29 @@ message WorkflowListRequest{ } message WorkflowUpdateRequest{ - string WorkflowName =1; + string WorkflowName = 1; string Namespace = 2; bool Memoized = 3; } message WorkflowLogRequest{ - string WorkflowName =1; + string WorkflowName = 1; string Namespace = 2; string PodName = 3; string Container = 4; + k8s.io.api.core.v1.PodLogOptions logOptions = 5; } message WorkflowDeleteRequest{ - string WorkflowName =1; + string WorkflowName = 1; string Namespace = 2; k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions DeleteOptions =3; } -message WorkflowDeleteResponse{} +message WorkflowDeleteResponse{ + string WorkflowName = 1; + string Status = 2; +} message LogEntry { string content = 1; @@ -78,35 +97,42 @@ service WorkflowService { rpc Retry(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}/retry" body: "*" }; } rpc Resubmit(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}/resubmit" body: "*" }; } rpc Resume(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}/resume" body: "*" }; } rpc Suspend(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}/suspend" body: "*" }; } rpc Terminate(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}" + put: "/api/v1/workflows/{Namespace}/{WorkflowName}/terminate" + body: "*" + }; + } + + rpc Lint(WorkflowCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + option (google.api.http) = { + post: "/api/v1/workflows/lint" body: "*" }; } @@ -116,4 +142,8 @@ service WorkflowService { option (google.api.http).get = "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs"; } + // Watch returns stream of application change events. + rpc Watch(WorkflowGetRequest) returns (stream github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { + option (google.api.http).get = "/api/v1/stream/workflows/{Namespace}/{WorkflowName}"; + } } \ No newline at end of file diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json index 466df7bf046c..aa81113916fa 100644 --- a/cmd/server/workflow/workflow.swagger.json +++ b/cmd/server/workflow/workflow.swagger.json @@ -16,6 +16,44 @@ "application/json" ], "paths": { + "/api/v1/stream/workflows/{Namespace}/{WorkflowName}": { + "get": { + "summary": "Watch returns stream of application change events.", + "operationId": "Watch", + "responses": { + "200": { + "description": "A successful response.(streaming responses)", + "schema": { + "$ref": "#/x-stream-definitions/v1alpha1Workflow" + } + } + }, + "parameters": [ + { + "name": "Namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "WorkflowName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "GetOptions.resourceVersion", + "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.", + "in": "query", + "required": false, + "type": "string" + } + ], + "tags": [ + "WorkflowService" + ] + } + }, "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs": { "get": { "summary": "PodLogs returns stream of log entries for the specified pod. Pod", @@ -52,6 +90,77 @@ "in": "query", "required": false, "type": "string" + }, + { + "name": "logOptions.container", + "description": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "logOptions.follow", + "description": "Follow the log stream of the pod. Defaults to false.\n+optional.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "logOptions.previous", + "description": "Return previous terminated container logs. Defaults to false.\n+optional.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "logOptions.sinceSeconds", + "description": "A relative time in seconds before the current time from which to show logs. If this value\nprecedes the time a pod was started, only logs since the pod start will be returned.\nIf this value is in the future, no logs will be returned.\nOnly one of sinceSeconds or sinceTime may be specified.\n+optional.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "logOptions.sinceTime.seconds", + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "logOptions.sinceTime.nanos", + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "logOptions.timestamps", + "description": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line\nof log output. Defaults to false.\n+optional.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "logOptions.tailLines", + "description": "If set, the number of lines from the end of the logs to show. If not specified,\nlogs are shown from the creation of the container or sinceSeconds or sinceTime\n+optional.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "logOptions.limitBytes", + "description": "If set, the number of bytes to read from the server before terminating the\nlog output. This may not display a complete final line of logging, and may return\nslightly more or slightly less than the specified limit.\n+optional.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" } ], "tags": [ @@ -85,6 +194,32 @@ ] } }, + "/api/v1/workflows/lint": { + "post": { + "operationId": "Lint", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/workflowWorkflowCreateRequest" + } + } + ], + "tags": [ + "WorkflowService" + ] + } + }, "/api/v1/workflows/{Namespace}": { "get": { "operationId": "List", @@ -125,6 +260,14 @@ "type": "boolean", "format": "boolean" }, + { + "name": "ListOptions.allowWatchBookmarks", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n\nThis field is beta.\n\n+optional", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, { "name": "ListOptions.resourceVersion", "description": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional.", @@ -272,7 +415,161 @@ "tags": [ "WorkflowService" ] - }, + } + }, + "/api/v1/workflows/{Namespace}/{WorkflowName}/resubmit": { + "put": { + "operationId": "Resubmit", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "parameters": [ + { + "name": "Namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "WorkflowName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/workflowWorkflowUpdateRequest" + } + } + ], + "tags": [ + "WorkflowService" + ] + } + }, + "/api/v1/workflows/{Namespace}/{WorkflowName}/resume": { + "put": { + "operationId": "Resume", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "parameters": [ + { + "name": "Namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "WorkflowName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/workflowWorkflowUpdateRequest" + } + } + ], + "tags": [ + "WorkflowService" + ] + } + }, + "/api/v1/workflows/{Namespace}/{WorkflowName}/retry": { + "put": { + "operationId": "Retry", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "parameters": [ + { + "name": "Namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "WorkflowName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/workflowWorkflowUpdateRequest" + } + } + ], + "tags": [ + "WorkflowService" + ] + } + }, + "/api/v1/workflows/{Namespace}/{WorkflowName}/suspend": { + "put": { + "operationId": "Suspend", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "parameters": [ + { + "name": "Namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "WorkflowName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/workflowWorkflowUpdateRequest" + } + } + ], + "tags": [ + "WorkflowService" + ] + } + }, + "/api/v1/workflows/{Namespace}/{WorkflowName}/terminate": { "put": { "operationId": "Terminate", "responses": { @@ -363,7 +660,7 @@ "type": "string" } }, - "description": "Quantity is a fixed-point representation of a number.\nIt provides convenient marshaling/unmarshaling in JSON and YAML,\nin addition to String() and Int64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9\n\u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e\n\u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e\n\u003csign\u003e ::= \"+\" | \"-\"\n\u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e\n\u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e\n\u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\nplaces. Numbers larger or more precise will be capped or rounded up.\n(E.g.: 0.1m will rounded up to 1m.)\nThis may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix\nit had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\".\nThis means that Exponent/suffix will be adjusted up or down (with a\ncorresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a\nfloating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed,\nbut will be re-emitted in their canonical form. (So always use canonical\nform, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without\nwriting some sort of special handling code in the hopes that that will\ncause implementors to also use a fixed point implementation.\n\n+protobuf=true\n+protobuf.embed=string\n+protobuf.options.marshal=false\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:deepcopy-gen=true\n+k8s:openapi-gen=true" + "description": "Quantity is a fixed-point representation of a number.\nIt provides convenient marshaling/unmarshaling in JSON and YAML,\nin addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9\n\u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e\n\u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e\n\u003csign\u003e ::= \"+\" | \"-\"\n\u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e\n\u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e\n\u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\nplaces. Numbers larger or more precise will be capped or rounded up.\n(E.g.: 0.1m will rounded up to 1m.)\nThis may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix\nit had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\".\nThis means that Exponent/suffix will be adjusted up or down (with a\ncorresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a\nfloating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed,\nbut will be re-emitted in their canonical form. (So always use canonical\nform, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without\nwriting some sort of special handling code in the hopes that that will\ncause implementors to also use a fixed point implementation.\n\n+protobuf=true\n+protobuf.embed=string\n+protobuf.options.marshal=false\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:deepcopy-gen=true\n+k8s:openapi-gen=true" }, "runtimeStreamError": { "type": "object", @@ -540,7 +837,7 @@ "items": { "type": "string" }, - "title": "Required: Monitors is a collection of Ceph monitors\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it" + "title": "Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" }, "path": { "type": "string", @@ -548,20 +845,20 @@ }, "user": { "type": "string", - "title": "Optional: User is the rados user name, default is admin\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it\n+optional" + "title": "Optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" }, "secretFile": { "type": "string", - "title": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it\n+optional" + "title": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" }, "secretRef": { "$ref": "#/definitions/v1LocalObjectReference", - "title": "Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it\n+optional" + "title": "Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" }, "readOnly": { "type": "boolean", "format": "boolean", - "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it\n+optional" + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" } }, "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod\nCephfs volumes do not support ownership management or SELinux relabeling." @@ -571,16 +868,16 @@ "properties": { "volumeID": { "type": "string", - "title": "volume id used to identify the volume in cinder\nMore info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md" + "title": "volume id used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" }, "fsType": { "type": "string", - "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md\n+optional" + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" }, "readOnly": { "type": "boolean", "format": "boolean", - "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md\n+optional" + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" }, "secretRef": { "$ref": "#/definitions/v1LocalObjectReference", @@ -618,7 +915,7 @@ "optional": { "type": "boolean", "format": "boolean", - "title": "Specify whether the ConfigMap or it's key must be defined\n+optional" + "title": "Specify whether the ConfigMap or its key must be defined\n+optional" } }, "description": "Selects a key from a ConfigMap." @@ -639,7 +936,7 @@ "optional": { "type": "boolean", "format": "boolean", - "title": "Specify whether the ConfigMap or it's keys must be defined\n+optional" + "title": "Specify whether the ConfigMap or its keys must be defined\n+optional" } }, "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nprojected volume as files using the keys in the Data field as the file names,\nunless the items element is populated with specific mappings of keys to paths.\nNote that this is identical to a configmap volume source without the default\nmode." @@ -665,7 +962,7 @@ "optional": { "type": "boolean", "format": "boolean", - "title": "Specify whether the ConfigMap or it's keys must be defined\n+optional" + "title": "Specify whether the ConfigMap or its keys must be defined\n+optional" } }, "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nvolume as files using the keys in the Data field as the file names, unless\nthe items element is populated with specific mappings of keys to paths.\nConfigMap volumes support ownership management and SELinux relabeling." @@ -746,6 +1043,10 @@ "$ref": "#/definitions/v1Probe", "title": "Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" }, + "startupProbe": { + "$ref": "#/definitions/v1Probe", + "title": "StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nThis is an alpha feature enabled by the StartupProbe feature flag.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, "lifecycle": { "$ref": "#/definitions/v1Lifecycle", "title": "Actions that the management system should take in response to container lifecycle events.\nCannot be updated.\n+optional" @@ -1033,18 +1334,16 @@ }, "description": "Represents a Fibre Channel volume.\nFibre Channel volumes can only be mounted as read/write once.\nFibre Channel volumes support ownership management and SELinux relabeling." }, - "v1Fields": { + "v1FieldsV1": { "type": "object", "properties": { - "map": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/v1Fields" - }, - "description": "Map stores a set of fields in a data structure like a Trie.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set,\nor a string representing a sub-field or item. The string will follow one of these four formats:\n'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map\n'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item\n'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list\n'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values\nIf a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal" + "Raw": { + "type": "string", + "format": "byte", + "description": "Raw is the underlying serialization of this object." } }, - "title": "Fields stores a set of fields in a data structure like a Trie.\nTo understand how this is used, see: https://github.com/kubernetes-sigs/structured-merge-diff" + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set,\nor a string representing a sub-field or item. The string will follow one of these four formats:\n'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map\n'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item\n'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list\n'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values\nIf a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff" }, "v1FlexVolumeSource": { "type": "object", @@ -1147,16 +1446,16 @@ "properties": { "endpoints": { "type": "string", - "title": "EndpointsName is the endpoint name that details Glusterfs topology.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod" + "title": "EndpointsName is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" }, "path": { "type": "string", - "title": "Path is the Glusterfs volume path.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod" + "title": "Path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" }, "readOnly": { "type": "boolean", "format": "boolean", - "title": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod\n+optional" + "title": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\n+optional" } }, "description": "Represents a Glusterfs mount that lasts the lifetime of a pod.\nGlusterfs volumes do not support ownership management or SELinux relabeling." @@ -1310,33 +1609,6 @@ }, "description": "Represents an ISCSI disk.\nISCSI volumes can only be mounted as read/write once.\nISCSI volumes support ownership management and SELinux relabeling." }, - "v1Initializer": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "name of the process that is responsible for initializing this object." - } - }, - "description": "Initializer is information about an initializer that has not yet completed." - }, - "v1Initializers": { - "type": "object", - "properties": { - "pending": { - "type": "array", - "items": { - "$ref": "#/definitions/v1Initializer" - }, - "title": "Pending is a list of initializers that must execute in order before this object is visible.\nWhen the last pending initializer is removed, and no failing result is set, the initializers\nstruct will be set to nil and the object is considered as initialized and visible to all\nclients.\n+patchMergeKey=name\n+patchStrategy=merge" - }, - "result": { - "$ref": "#/definitions/v1Status", - "description": "If result is set with the Failure field, the object will be persisted to storage and then deleted,\nensuring that other clients can observe the deletion." - } - }, - "description": "Initializers tracks the progress of initialization." - }, "v1KeyToPath": { "type": "object", "properties": { @@ -1406,7 +1678,7 @@ }, "preStop": { "$ref": "#/definitions/v1Handler", - "title": "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The reason for termination is passed to the\nhandler. The Pod's termination grace period countdown begins before the\nPreStop hooked is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod. Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" + "title": "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The reason for termination is passed to the\nhandler. The Pod's termination grace period countdown begins before the\nPreStop hooked is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod. Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" } }, "description": "Lifecycle describes actions that the management system should take in response to container lifecycle\nevents. For the PostStart and PreStop lifecycle handlers, management of the container blocks\nuntil the action is complete, unless the container process fails, in which case the handler is aborted." @@ -1416,15 +1688,20 @@ "properties": { "selfLink": { "type": "string", - "title": "selfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n+optional" + "description": "selfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional" }, "resourceVersion": { "type": "string", - "title": "String that identifies the server's internal version of this object that\ncan be used by clients to determine when objects have changed.\nValue must be treated as opaque by clients and passed unmodified back to the server.\nPopulated by the system.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency\n+optional" + "title": "String that identifies the server's internal version of this object that\ncan be used by clients to determine when objects have changed.\nValue must be treated as opaque by clients and passed unmodified back to the server.\nPopulated by the system.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" }, "continue": { "type": "string", "description": "continue may be set if the user set a limit on the number of items returned, and indicates that\nthe server has more data available. The value is opaque and may be used to issue another request\nto the endpoint that served this list to retrieve the next set of available objects. Continuing a\nconsistent list may not be possible if the server configuration has changed or more than a few\nminutes have passed. The resourceVersion field returned when using this continue value will be\nidentical to the value in the first response, unless you have received this token from an error\nmessage." + }, + "remainingItemCount": { + "type": "string", + "format": "int64", + "title": "remainingItemCount is the number of subsequent items in the list which are not included in this\nlist response. If the list request contained label or field selectors, then the number of\nremaining items is unknown and the field will be left unset and omitted during serialization.\nIf the list is complete (either because it is not chunking or because this is the last chunk),\nthen there are no more remaining items and this field will be left unset and omitted during\nserialization.\nServers older than v1.15 do not set this field.\nThe intended use of the remainingItemCount is *estimating* the size of a collection. Clients\nshould not rely on the remainingItemCount to be set or to be exact.\n+optional" } }, "description": "ListMeta describes metadata that synthetic resources must have, including lists and\nvarious status objects. A resource may have only one of {ObjectMeta, ListMeta}." @@ -1445,6 +1722,11 @@ "format": "boolean", "title": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional" }, + "allowWatchBookmarks": { + "type": "boolean", + "format": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n\nThis field is beta.\n\n+optional" + }, "resourceVersion": { "type": "string", "title": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional" @@ -1495,9 +1777,13 @@ "$ref": "#/definitions/v1Time", "title": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'\n+optional" }, - "fields": { - "$ref": "#/definitions/v1Fields", - "title": "Fields identifies a set of fields.\n+optional" + "fieldsType": { + "type": "string", + "title": "FieldsType is the discriminator for the different fields format and version.\nThere is currently only one possible value: \"FieldsV1\"" + }, + "fieldsV1": { + "$ref": "#/definitions/v1FieldsV1", + "title": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.\n+optional" } }, "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource\nthat the fieldset applies to." @@ -1615,7 +1901,7 @@ }, "generateName": { "type": "string", - "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency\n+optional" + "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional" }, "namespace": { "type": "string", @@ -1623,7 +1909,7 @@ }, "selfLink": { "type": "string", - "title": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n+optional" + "description": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional" }, "uid": { "type": "string", @@ -1631,7 +1917,7 @@ }, "resourceVersion": { "type": "string", - "description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency\n+optional" + "description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" }, "generation": { "type": "string", @@ -1640,11 +1926,11 @@ }, "creationTimestamp": { "$ref": "#/definitions/v1Time", - "description": "CreationTimestamp is a timestamp representing the server time when this object was\ncreated. It is not guaranteed to be set in happens-before order across separate operations.\nClients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system.\nRead-only.\nNull for lists.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata\n+optional" + "description": "CreationTimestamp is a timestamp representing the server time when this object was\ncreated. It is not guaranteed to be set in happens-before order across separate operations.\nClients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system.\nRead-only.\nNull for lists.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n+optional" }, "deletionTimestamp": { "$ref": "#/definitions/v1Time", - "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This\nfield is set by the server when a graceful deletion is requested by the user, and is not\ndirectly settable by a client. The resource is expected to be deleted (no longer visible\nfrom resource lists, and not reachable by name) after the time in this field, once the\nfinalizers list is empty. As long as the finalizers list contains items, deletion is blocked.\nOnce the deletionTimestamp is set, this value may not be unset or be set further into the\nfuture, although it may be shortened or the resource may be deleted prior to this time.\nFor example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react\nby sending a graceful termination signal to the containers in the pod. After that 30 seconds,\nthe Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,\nremove the pod from the API. In the presence of network partitions, this object may still\nexist after this timestamp, until an administrator or automated process can determine the\nresource is fully terminated.\nIf not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata\n+optional" + "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This\nfield is set by the server when a graceful deletion is requested by the user, and is not\ndirectly settable by a client. The resource is expected to be deleted (no longer visible\nfrom resource lists, and not reachable by name) after the time in this field, once the\nfinalizers list is empty. As long as the finalizers list contains items, deletion is blocked.\nOnce the deletionTimestamp is set, this value may not be unset or be set further into the\nfuture, although it may be shortened or the resource may be deleted prior to this time.\nFor example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react\nby sending a graceful termination signal to the containers in the pod. After that 30 seconds,\nthe Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,\nremove the pod from the API. In the presence of network partitions, this object may still\nexist after this timestamp, until an administrator or automated process can determine the\nresource is fully terminated.\nIf not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n+optional" }, "deletionGracePeriodSeconds": { "type": "string", @@ -1672,10 +1958,6 @@ }, "title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge" }, - "initializers": { - "$ref": "#/definitions/v1Initializers", - "description": "An initializer is a controller which enforces some system invariant at object creation time.\nThis field is a list of initializers that have not yet acted on this object. If nil or empty,\nthis object has been completely initialized. Otherwise, the object is considered uninitialized\nand is hidden (in list/watch and get calls) from clients that haven't explicitly asked to\nobserve uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers.\nOnly privileged users may set or modify this list. Once it is empty, it may not be modified further\nby any user.\n\nDEPRECATED - initializers are an alpha field and will be removed in v1.15." - }, "finalizers": { "type": "array", "items": { @@ -1692,7 +1974,7 @@ "items": { "$ref": "#/definitions/v1ManagedFieldsEntry" }, - "description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\nThis field is alpha and can be changed or removed without notice.\n\n+optional" + "description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\n+optional" } }, "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects\nusers must create." @@ -1706,7 +1988,7 @@ }, "kind": { "type": "string", - "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" }, "name": { "type": "string", @@ -1734,7 +2016,7 @@ "properties": { "metadata": { "$ref": "#/definitions/v1ObjectMeta", - "title": "Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata\n+optional" + "title": "Standard object's metadata.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n+optional" }, "spec": { "$ref": "#/definitions/v1PersistentVolumeClaimSpec", @@ -1974,6 +2256,50 @@ }, "description": "PodDNSConfigOption defines DNS resolver options of a pod." }, + "v1PodLogOptions": { + "type": "object", + "properties": { + "container": { + "type": "string", + "title": "The container for which to stream logs. Defaults to only container if there is one container in the pod.\n+optional" + }, + "follow": { + "type": "boolean", + "format": "boolean", + "title": "Follow the log stream of the pod. Defaults to false.\n+optional" + }, + "previous": { + "type": "boolean", + "format": "boolean", + "title": "Return previous terminated container logs. Defaults to false.\n+optional" + }, + "sinceSeconds": { + "type": "string", + "format": "int64", + "title": "A relative time in seconds before the current time from which to show logs. If this value\nprecedes the time a pod was started, only logs since the pod start will be returned.\nIf this value is in the future, no logs will be returned.\nOnly one of sinceSeconds or sinceTime may be specified.\n+optional" + }, + "sinceTime": { + "$ref": "#/definitions/v1Time", + "title": "An RFC3339 timestamp from which to show logs. If this value\nprecedes the time a pod was started, only logs since the pod start will be returned.\nIf this value is in the future, no logs will be returned.\nOnly one of sinceSeconds or sinceTime may be specified.\n+optional" + }, + "timestamps": { + "type": "boolean", + "format": "boolean", + "title": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line\nof log output. Defaults to false.\n+optional" + }, + "tailLines": { + "type": "string", + "format": "int64", + "title": "If set, the number of lines from the end of the logs to show. If not specified,\nlogs are shown from the creation of the container or sinceSeconds or sinceTime\n+optional" + }, + "limitBytes": { + "type": "string", + "format": "int64", + "title": "If set, the number of bytes to read from the server before terminating the\nlog output. This may not display a complete final line of logging, and may return\nslightly more or slightly less than the specified limit.\n+optional" + } + }, + "description": "PodLogOptions is the query options for a Pod's logs REST call." + }, "v1PodSecurityContext": { "type": "object", "properties": { @@ -1981,6 +2307,10 @@ "$ref": "#/definitions/v1SELinuxOptions", "title": "The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\n+optional" }, + "windowsOptions": { + "$ref": "#/definitions/v1WindowsSecurityContextOptions", + "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options within a container's SecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, "runAsUser": { "type": "string", "format": "int64", @@ -2079,7 +2409,7 @@ "successThreshold": { "type": "integer", "format": "int32", - "title": "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness. Minimum value is 1.\n+optional" + "title": "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.\n+optional" }, "failureThreshold": { "type": "integer", @@ -2146,11 +2476,11 @@ "items": { "type": "string" }, - "title": "A collection of Ceph monitors.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it" + "title": "A collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" }, "image": { "type": "string", - "title": "The rados image name.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it" + "title": "The rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" }, "fsType": { "type": "string", @@ -2158,24 +2488,24 @@ }, "pool": { "type": "string", - "title": "The rados pool name.\nDefault is rbd.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "The rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" }, "user": { "type": "string", - "title": "The rados user name.\nDefault is admin.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "The rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" }, "keyring": { "type": "string", - "title": "Keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "Keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" }, "secretRef": { "$ref": "#/definitions/v1LocalObjectReference", - "title": "SecretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "SecretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" }, "readOnly": { "type": "boolean", "format": "boolean", - "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it\n+optional" + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" } }, "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod.\nRBD volumes support ownership management and SELinux relabeling." @@ -2317,7 +2647,7 @@ "optional": { "type": "boolean", "format": "boolean", - "title": "Specify whether the Secret or it's key must be defined\n+optional" + "title": "Specify whether the Secret or its key must be defined\n+optional" } }, "description": "SecretKeySelector selects a key of a Secret." @@ -2365,7 +2695,7 @@ "optional": { "type": "boolean", "format": "boolean", - "title": "Specify whether the Secret or it's keys must be defined\n+optional" + "title": "Specify whether the Secret or its keys must be defined\n+optional" } }, "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume\nas files using the keys in the Data field as the file names.\nSecret volumes support ownership management and SELinux relabeling." @@ -2386,6 +2716,10 @@ "$ref": "#/definitions/v1SELinuxOptions", "title": "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" }, + "windowsOptions": { + "$ref": "#/definitions/v1WindowsSecurityContextOptions", + "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, "runAsUser": { "type": "string", "format": "int64", @@ -2437,89 +2771,6 @@ }, "description": "ServiceAccountTokenProjection represents a projected service account token\nvolume. This projection can be used to insert a service account token into\nthe pods runtime filesystem for use against APIs (Kubernetes API Server or\notherwise)." }, - "v1Status": { - "type": "object", - "properties": { - "metadata": { - "$ref": "#/definitions/v1ListMeta", - "title": "Standard list metadata.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\n+optional" - }, - "status": { - "type": "string", - "title": "Status of the operation.\nOne of: \"Success\" or \"Failure\".\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status\n+optional" - }, - "message": { - "type": "string", - "title": "A human-readable description of the status of this operation.\n+optional" - }, - "reason": { - "type": "string", - "title": "A machine-readable description of why this operation is in the\n\"Failure\" status. If this value is empty there\nis no information available. A Reason clarifies an HTTP status\ncode but does not override it.\n+optional" - }, - "details": { - "$ref": "#/definitions/v1StatusDetails", - "title": "Extended data associated with the reason. Each reason may define its\nown extended details. This field is optional and the data returned\nis not guaranteed to conform to any schema except that defined by\nthe reason type.\n+optional" - }, - "code": { - "type": "integer", - "format": "int32", - "title": "Suggested HTTP return code for this status, 0 if not set.\n+optional" - } - }, - "description": "Status is a return value for calls that don't return other objects." - }, - "v1StatusCause": { - "type": "object", - "properties": { - "reason": { - "type": "string", - "title": "A machine-readable description of the cause of the error. If this value is\nempty there is no information available.\n+optional" - }, - "message": { - "type": "string", - "title": "A human-readable description of the cause of the error. This field may be\npresented as-is to a reader.\n+optional" - }, - "field": { - "type": "string", - "description": "The field of the resource that has caused this error, as named by its JSON\nserialization. May include dot and postfix notation for nested attributes.\nArrays are zero-indexed. Fields may appear more than once in an array of\ncauses due to fields having multiple errors.\nOptional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"\n+optional" - } - }, - "description": "StatusCause provides more information about an api.Status failure, including\ncases when multiple errors are encountered." - }, - "v1StatusDetails": { - "type": "object", - "properties": { - "name": { - "type": "string", - "title": "The name attribute of the resource associated with the status StatusReason\n(when there is a single name which can be described).\n+optional" - }, - "group": { - "type": "string", - "title": "The group attribute of the resource associated with the status StatusReason.\n+optional" - }, - "kind": { - "type": "string", - "title": "The kind attribute of the resource associated with the status StatusReason.\nOn some operations may differ from the requested resource Kind.\nMore info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\n+optional" - }, - "uid": { - "type": "string", - "title": "UID of the resource.\n(when there is a single resource which can be described).\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional" - }, - "causes": { - "type": "array", - "items": { - "$ref": "#/definitions/v1StatusCause" - }, - "title": "The Causes array includes more details associated with the StatusReason\nfailure. Not all StatusReasons may provide detailed causes.\n+optional" - }, - "retryAfterSeconds": { - "type": "integer", - "format": "int32", - "title": "If specified, the time in seconds before the operation should be retried. Some errors may indicate\nthe client must take an alternate action - for those errors this field may indicate how long to wait\nbefore taking the alternate action.\n+optional" - } - }, - "description": "StatusDetails is a set of additional properties that MAY be set by the\nserver to provide additional information about a response. The Reason\nfield of a Status object defines what attributes will be set. Clients\nmust ignore fields that do not match the defined type of each attribute,\nand should assume that any attribute may be empty, invalid, or under\ndefined." - }, "v1StorageOSVolumeSource": { "type": "object", "properties": { @@ -2690,7 +2941,7 @@ }, "subPathExpr": { "type": "string", - "title": "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive.\nThis field is alpha in 1.14.\n+optional" + "title": "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive.\nThis field is beta in 1.15.\n+optional" } }, "description": "VolumeMount describes a mounting of a Volume within a container." @@ -2750,11 +3001,11 @@ }, "iscsi": { "$ref": "#/definitions/v1ISCSIVolumeSource", - "title": "ISCSI represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md\n+optional" + "title": "ISCSI represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md\n+optional" }, "glusterfs": { "$ref": "#/definitions/v1GlusterfsVolumeSource", - "title": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md\n+optional" + "title": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md\n+optional" }, "persistentVolumeClaim": { "$ref": "#/definitions/v1PersistentVolumeClaimVolumeSource", @@ -2762,7 +3013,7 @@ }, "rbd": { "$ref": "#/definitions/v1RBDVolumeSource", - "title": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md\n+optional" + "title": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md\n+optional" }, "flexVolume": { "$ref": "#/definitions/v1FlexVolumeSource", @@ -2770,7 +3021,7 @@ }, "cinder": { "$ref": "#/definitions/v1CinderVolumeSource", - "title": "Cinder represents a cinder volume attached and mounted on kubelets host machine\nMore info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md\n+optional" + "title": "Cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" }, "cephfs": { "$ref": "#/definitions/v1CephFSVolumeSource", @@ -2872,6 +3123,24 @@ }, "title": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)" }, + "v1WindowsSecurityContextOptions": { + "type": "object", + "properties": { + "gmsaCredentialSpecName": { + "type": "string", + "title": "GMSACredentialSpecName is the name of the GMSA credential spec to use.\nThis field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.\n+optional" + }, + "gmsaCredentialSpec": { + "type": "string", + "title": "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field.\nThis field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.\n+optional" + }, + "runAsUserName": { + "type": "string", + "title": "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nThis field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.\n+optional" + } + }, + "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials." + }, "v1alpha1ArchiveStrategy": { "type": "object", "properties": { @@ -2892,14 +3161,14 @@ "items": { "$ref": "#/definitions/v1alpha1Parameter" }, - "title": "Parameters is the list of parameters to pass to the template or workflow" + "title": "Parameters is the list of parameters to pass to the template or workflow\n+patchStrategy=merge\n+patchMergeKey=name" }, "artifacts": { "type": "array", "items": { "$ref": "#/definitions/v1alpha1Artifact" }, - "title": "Artifacts is the list of artifacts to pass to the template or workflow" + "title": "Artifacts is the list of artifacts to pass to the template or workflow\n+patchStrategy=merge\n+patchMergeKey=name" } }, "title": "Arguments to a template" @@ -3097,7 +3366,7 @@ "items": { "$ref": "#/definitions/v1alpha1DAGTask" }, - "title": "Tasks are a list of DAG tasks" + "title": "Tasks are a list of DAG tasks\n+patchStrategy=merge\n+patchMergeKey=name" }, "failFast": { "type": "boolean", @@ -3107,6 +3376,16 @@ }, "title": "DAGTemplate is a template subtype for directed acyclic graph templates" }, + "v1alpha1ExecutorConfig": { + "type": "object", + "properties": { + "serviceAccountName": { + "type": "string", + "description": "ServiceAccountName specifies the service account name of the executor container." + } + }, + "description": "ExecutorConfig holds configurations of an executor container." + }, "v1alpha1GitArtifact": { "type": "object", "properties": { @@ -3236,14 +3515,14 @@ "items": { "$ref": "#/definitions/v1alpha1Parameter" }, - "title": "Parameters are a list of parameters passed as inputs" + "title": "Parameters are a list of parameters passed as inputs\n+patchStrategy=merge\n+patchMergeKey=name" }, "artifacts": { "type": "array", "items": { "$ref": "#/definitions/v1alpha1Artifact" }, - "title": "Artifact are a list of artifacts passed as inputs" + "title": "Artifact are a list of artifacts passed as inputs\n+patchStrategy=merge\n+patchMergeKey=name" } }, "title": "Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another" @@ -3344,14 +3623,14 @@ "items": { "$ref": "#/definitions/v1alpha1Parameter" }, - "title": "Parameters holds the list of output parameters produced by a step" + "title": "Parameters holds the list of output parameters produced by a step\n+patchStrategy=merge\n+patchMergeKey=name" }, "artifacts": { "type": "array", "items": { "$ref": "#/definitions/v1alpha1Artifact" }, - "title": "Artifacts holds the list of output artifacts produced by a step" + "title": "Artifacts holds the list of output artifacts produced by a step\n+patchStrategy=merge\n+patchMergeKey=name" }, "result": { "type": "string", @@ -3498,6 +3777,10 @@ "secretKeySecret": { "$ref": "#/definitions/v1SecretKeySelector", "title": "SecretKeySecret is the secret selector to the bucket's secret key" + }, + "roleARN": { + "type": "string", + "description": "RoleARN is the Amazon Resource Name (ARN) of the role to assume." } }, "title": "S3Bucket contains the access information required for interfacing with an S3 bucket" @@ -3624,21 +3907,21 @@ "items": { "$ref": "#/definitions/v1Volume" }, - "description": "Volumes is a list of volumes that can be mounted by containers in a template." + "title": "Volumes is a list of volumes that can be mounted by containers in a template.\n+patchStrategy=merge\n+patchMergeKey=name" }, "initContainers": { "type": "array", "items": { "$ref": "#/definitions/v1alpha1UserContainer" }, - "description": "InitContainers is a list of containers which run before the main container." + "title": "InitContainers is a list of containers which run before the main container.\n+patchStrategy=merge\n+patchMergeKey=name" }, "sidecars": { "type": "array", "items": { "$ref": "#/definitions/v1alpha1UserContainer" }, - "title": "Sidecars is a list of containers which run alongside the main container\nSidecars are automatically killed when the main container completes" + "title": "Sidecars is a list of containers which run alongside the main container\nSidecars are automatically killed when the main container completes\n+patchStrategy=merge\n+patchMergeKey=name" }, "archiveLocation": { "$ref": "#/definitions/v1alpha1ArtifactLocation", @@ -3663,7 +3946,7 @@ "items": { "$ref": "#/definitions/v1Toleration" }, - "description": "Tolerations to apply to workflow pods." + "title": "Tolerations to apply to workflow pods.\n+patchStrategy=merge\n+patchMergeKey=key" }, "schedulerName": { "type": "string", @@ -3682,16 +3965,29 @@ "type": "string", "title": "ServiceAccountName to apply to workflow pods" }, + "automountServiceAccountToken": { + "type": "boolean", + "format": "boolean", + "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods.\nServiceAccountName of ExecutorConfig must be specified if this value is false." + }, + "executor": { + "$ref": "#/definitions/v1alpha1ExecutorConfig", + "description": "Executor holds configurations of the executor container." + }, "hostAliases": { "type": "array", "items": { "$ref": "#/definitions/v1HostAlias" }, - "title": "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec" + "title": "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec\n+patchStrategy=merge\n+patchMergeKey=ip" }, "securityContext": { "$ref": "#/definitions/v1PodSecurityContext", "title": "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field.\n+optional" + }, + "podSpecPatch": { + "type": "string", + "description": "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of\ncontainer fields which are not strings (e.g. resource limits)." } }, "title": "Template is a reusable and composable unit of execution in a workflow" @@ -3789,7 +4085,7 @@ "items": { "$ref": "#/definitions/v1alpha1Template" }, - "title": "Templates is a list of workflow templates used in a workflow" + "title": "Templates is a list of workflow templates used in a workflow\n+patchStrategy=merge\n+patchMergeKey=name" }, "entrypoint": { "type": "string", @@ -3803,19 +4099,28 @@ "type": "string", "description": "ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as." }, + "automountServiceAccountToken": { + "type": "boolean", + "format": "boolean", + "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods.\nServiceAccountName of ExecutorConfig must be specified if this value is false." + }, + "executor": { + "$ref": "#/definitions/v1alpha1ExecutorConfig", + "description": "Executor holds configurations of executor containers of the workflow." + }, "volumes": { "type": "array", "items": { "$ref": "#/definitions/v1Volume" }, - "description": "Volumes is a list of volumes that can be mounted by containers in a workflow." + "title": "Volumes is a list of volumes that can be mounted by containers in a workflow.\n+patchStrategy=merge\n+patchMergeKey=name" }, "volumeClaimTemplates": { "type": "array", "items": { "$ref": "#/definitions/v1PersistentVolumeClaim" }, - "title": "VolumeClaimTemplates is a list of claims that containers are allowed to reference.\nThe Workflow controller will create the claims at the beginning of the workflow\nand delete the claims upon completion of the workflow" + "title": "VolumeClaimTemplates is a list of claims that containers are allowed to reference.\nThe Workflow controller will create the claims at the beginning of the workflow\nand delete the claims upon completion of the workflow\n+patchStrategy=merge\n+patchMergeKey=name" }, "parallelism": { "type": "string", @@ -3847,14 +4152,14 @@ "items": { "$ref": "#/definitions/v1Toleration" }, - "description": "Tolerations to apply to workflow pods." + "title": "Tolerations to apply to workflow pods.\n+patchStrategy=merge\n+patchMergeKey=key" }, "imagePullSecrets": { "type": "array", "items": { "$ref": "#/definitions/v1LocalObjectReference" }, - "title": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images\nin pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets\ncan be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.\nMore info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod" + "title": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images\nin pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets\ncan be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.\nMore info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod\n+patchStrategy=merge\n+patchMergeKey=name" }, "hostNetwork": { "type": "boolean", @@ -3893,8 +4198,7 @@ "title": "Set scheduler name for all pods.\nWill be overridden if container/script template's scheduler name is set.\nDefault scheduler will be used if neither specified.\n+optional" }, "podGC": { - "$ref": "#/definitions/v1alpha1PodGC", - "title": "PodGC describes the strategy to use when to deleting completed pods" + "$ref": "#/definitions/v1alpha1PodGC" }, "podPriorityClassName": { "type": "string", @@ -3910,11 +4214,14 @@ "items": { "$ref": "#/definitions/v1HostAlias" }, - "title": "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec" + "title": "+patchStrategy=merge\n+patchMergeKey=ip" }, "securityContext": { - "$ref": "#/definitions/v1PodSecurityContext", - "title": "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field.\n+optional" + "$ref": "#/definitions/v1PodSecurityContext" + }, + "podSpecPatch": { + "type": "string", + "description": "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of\ncontainer fields which are not strings (e.g. resource limits)." } }, "description": "WorkflowSpec is the specification of a Workflow." @@ -3949,6 +4256,13 @@ }, "description": "Nodes is a mapping between a node ID and the node's status." }, + "storedTemplates": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1alpha1Template" + }, + "description": "StoredTemplates is a mapping between a template ref and the node's status." + }, "persistentVolumeClaims": { "type": "array", "items": { @@ -3961,7 +4275,7 @@ "title": "Outputs captures output values and artifact locations produced by the workflow via global outputs" } }, - "title": "WorkflowStatus contains overall status information about a workflow\n+k8s:openapi-gen=false" + "title": "WorkflowStatus contains overall status information about a workflow" }, "v1alpha1WorkflowStep": { "type": "object", @@ -4019,22 +4333,69 @@ } } }, + "workflowSubmitOptions": { + "type": "object", + "properties": { + "Name": { + "type": "string" + }, + "GenerateName": { + "type": "string" + }, + "InstanceID": { + "type": "string" + }, + "Entrypoint": { + "type": "string" + }, + "Parameters": { + "type": "array", + "items": { + "type": "string" + } + }, + "ServiceAccount": { + "type": "string" + }, + "ServerDryRun": { + "type": "boolean", + "format": "boolean" + }, + "Labels": { + "type": "string" + }, + "OwnerReference": { + "$ref": "#/definitions/v1OwnerReference" + } + } + }, "workflowWorkflowCreateRequest": { "type": "object", "properties": { "Namespace": { "type": "string" }, - "Workflows": { + "Workflow": { "$ref": "#/definitions/v1alpha1Workflow" }, "CreateOptions": { "$ref": "#/definitions/v1CreateOptions" + }, + "SubmitOptions": { + "$ref": "#/definitions/workflowSubmitOptions" } } }, "workflowWorkflowDeleteResponse": { - "type": "object" + "type": "object", + "properties": { + "WorkflowName": { + "type": "string" + }, + "Status": { + "type": "string" + } + } }, "workflowWorkflowUpdateRequest": { "type": "object", @@ -4078,6 +4439,14 @@ "$ref": "#/definitions/v1alpha1TemplateRef", "title": "TemplateRef is the reference to the template resource which this node corresponds to.\nNot applicable to virtual nodes (e.g. Retry, StepGroup)" }, + "storedTemplateID": { + "type": "string", + "description": "StoredTemplateID is the ID of stored template." + }, + "workflowTemplateName": { + "type": "string", + "description": "WorkflowTemplateName is the WorkflowTemplate resource name on which the resolved template of this node is retrieved." + }, "phase": { "type": "string", "description": "Phase a simple, high-level summary of where the node is in its lifecycle.\nCan be used as a state machine." @@ -4130,10 +4499,22 @@ "description": "OutboundNodes tracks the node IDs which are considered \"outbound\" nodes to a template invocation.\nFor every invocation of a template, there are nodes which we considered as \"outbound\". Essentially,\nthese are last nodes in the execution sequence to run, before the template is considered completed.\nThese nodes are then connected as parents to a following step.\n\nIn the case of single pod steps (i.e. container, script, resource templates), this list will be nil\nsince the pod itself is already considered the \"outbound\" node.\nIn the case of DAGs, outbound nodes are the \"target\" tasks (tasks with no children).\nIn the case of steps, outbound nodes are all the containers involved in the last step group.\nNOTE: since templates are composable, the list of outbound nodes are carried upwards when\na DAG/steps template invokes another DAG/steps template. In other words, the outbound nodes of\na template, will be a superset of the outbound nodes of its last children." } }, - "title": "NodeStatus contains status information about an individual node in the workflow\n+k8s:openapi-gen=false" + "title": "NodeStatus contains status information about an individual node in the workflow" } }, "x-stream-definitions": { + "v1alpha1Workflow": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/v1alpha1Workflow" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of v1alpha1Workflow" + }, "workflowLogEntry": { "type": "object", "properties": { diff --git a/cmd/server/workflow/workflow_db_service.go b/cmd/server/workflow/workflow_db_service.go index 3f0a73040c00..e99dccea33c3 100644 --- a/cmd/server/workflow/workflow_db_service.go +++ b/cmd/server/workflow/workflow_db_service.go @@ -58,7 +58,6 @@ func (db *DBService) List(namespace string, pageSize uint, lastId string) (*v1al if db.wfDBctx == nil { return nil, errors.New(errors.CodeInternal, "DB Context is not initialized") } - var wfList *v1alpha1.WorkflowList var err error @@ -79,3 +78,12 @@ func (db *DBService) List(namespace string, pageSize uint, lastId string) (*v1al return wfList, err } + +func (db *DBService) Delete(wfName string, namespace string) (error) { + if db.wfDBctx == nil { + return errors.New(errors.CodeInternal, "DB Context is not initialized") + } + cond := dblib.Cond{"name": wfName, "namespace": namespace} + + return db.wfDBctx.Delete(cond) +} \ No newline at end of file diff --git a/cmd/server/workflow/workflow_server.go b/cmd/server/workflow/workflow_server.go index a6ee26fdf83a..a2b9907cb526 100644 --- a/cmd/server/workflow/workflow_server.go +++ b/cmd/server/workflow/workflow_server.go @@ -1,21 +1,29 @@ package workflow import ( + "bufio" "encoding/json" "errors" "fmt" - "github.com/argoproj/argo/persist/sqldb" - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo/pkg/client/clientset/versioned" - wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" - "github.com/argoproj/argo/workflow/config" - "github.com/argoproj/argo/workflow/util" + "strings" + log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc/metadata" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + + "github.com/argoproj/argo/persist/sqldb" + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/pkg/client/clientset/versioned" + wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" + cmdutil "github.com/argoproj/argo/util/cmd" + "github.com/argoproj/argo/workflow/common" + "github.com/argoproj/argo/workflow/config" + "github.com/argoproj/argo/workflow/util" + "github.com/argoproj/argo/workflow/validate" ) type WorkflowServer struct { @@ -23,19 +31,18 @@ type WorkflowServer struct { WfClientset *versioned.Clientset KubeClientset *kubernetes.Clientset EnableClientAuth bool - Config *config.WorkflowControllerConfig + Config *config.WorkflowControllerConfig WfDBService *DBService WfKubeService *KubeService } - - -func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) (*WorkflowServer) { +func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) *WorkflowServer { wfServer := WorkflowServer{Namespace: namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} var err error - wfServer.WfDBService.wfDBctx, err = wfServer.CreatePersistenceContext(namespace, kubeClientSet,config.Persistence) - + if config != nil && config.Persistence != nil { + wfServer.WfDBService.wfDBctx, err = wfServer.CreatePersistenceContext(namespace, kubeClientSet, config.Persistence) + } if err != nil { log.Errorf("Error Creating DB Context. %v", err) return nil @@ -43,7 +50,6 @@ func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeC return &wfServer } - func (s *WorkflowServer) CreatePersistenceContext(namespace string, kubeClientSet *kubernetes.Clientset, config *config.PersistConfig) (*sqldb.WorkflowDBContext, error) { var wfDBCtx sqldb.WorkflowDBContext @@ -66,13 +72,13 @@ func (s *WorkflowServer) GetWFClient(ctx context.Context) (*versioned.Clientset, md, _ := metadata.FromIncomingContext(ctx) - if s.EnableClientAuth { + if !s.EnableClientAuth { return s.WfClientset, s.KubeClientset, nil } var restConfigStr, bearerToken string if len(md.Get(CLIENT_REST_CONFIG)) == 0 { - return nil,nil, errors.New("Client kubeconfig is not found") + return nil, nil, errors.New("Client kubeconfig is not found") } restConfigStr = md.Get(CLIENT_REST_CONFIG)[0] @@ -112,7 +118,27 @@ func (s *WorkflowServer) Create(ctx context.Context, in *WorkflowCreateRequest) if in.Namespace != "" { namespace = in.Namespace } - wf, err := s.WfKubeService.Create(wfClient,namespace,in.Workflows) + if in.Workflow == nil { + return nil, errors.New("Workflow body not found") + } + + in.Workflow.Namespace = namespace + + wf, err := s.ApplyWorkflowOptions(in.Workflow, in.SubmitOptions) + if err != nil { + return nil, err + } + + err = validate.ValidateWorkflow(wfClient, namespace, wf, validate.ValidateOpts{}) + if err != nil { + return nil, err + } + + if in.SubmitOptions != nil && in.SubmitOptions.ServerDryRun { + return util.CreateServerDryRun(wf, wfClient) + } + + wf, err = s.WfKubeService.Create(wfClient, namespace, in.Workflow) if err != nil { log.Warnf("Create request is failed. Error: %s", err) @@ -137,7 +163,7 @@ func (s *WorkflowServer) Get(ctx context.Context, in *WorkflowGetRequest) (*v1al if s.WfDBService != nil { wf, err = s.WfDBService.Get(in.WorkflowName, in.Namespace) - }else { + } else { wf, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) } if err != nil { @@ -162,20 +188,19 @@ func (s *WorkflowServer) List(ctx context.Context, in *WorkflowListRequest) (*v1 listOpt := in.ListOptions var wfList *v1alpha1.WorkflowList if s.WfDBService != nil { - wfList, err = s.WfDBService.List(namespace, uint(listOpt.Limit),"") - }else { - wfList, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).List(*listOpt) + wfList, err = s.WfDBService.List(namespace, uint(listOpt.Limit), "") + } else { + + wfList, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).List(v1.ListOptions{}) } if err != nil { - fmt.Println(err) + return nil, err } return wfList, nil } - - func (s *WorkflowServer) Delete(ctx context.Context, in *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { namespace := s.Namespace @@ -187,13 +212,22 @@ func (s *WorkflowServer) Delete(ctx context.Context, in *WorkflowDeleteRequest) if err != nil { return nil, err } + if s.WfDBService != nil { + err = s.WfDBService.Delete(in.WorkflowName, in.Namespace) + if err != nil { + return nil, err + } + } err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(in.WorkflowName, &v1.DeleteOptions{}) + if err != nil { - log.Fatal(err) return nil, err } - //msgStr := fmt.Sprint("Workflow '%s' deleted\n", in.WorkflowName) - return nil, nil + var rsp WorkflowDeleteResponse + rsp.WorkflowName = in.WorkflowName + rsp.Status = "Deleted" + + return &rsp, nil } func (s *WorkflowServer) Retry(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { @@ -309,4 +343,166 @@ func (s *WorkflowServer) Terminate(ctx context.Context, in *WorkflowUpdateReques return nil, err } return wf, nil -} \ No newline at end of file +} + +func (s *WorkflowServer) Lint(ctx context.Context, in *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { + wfClient, _, err := s.GetWFClient(ctx) + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + + err = validate.ValidateWorkflow(wfClient, namespace, in.Workflow, validate.ValidateOpts{}) + if err != nil { + return nil, err + } + return in.Workflow, nil +} + +func (s *WorkflowServer) Watch(in *WorkflowGetRequest, ws WorkflowService_WatchServer) error { + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + wfClient, _, err := s.GetWFClient(ws.Context()) + + if err != nil { + return err + } + wfs, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Watch(v1.ListOptions{}) + if err != nil { + return err + } + done := make(chan bool) + go func() { + for next := range wfs.ResultChan() { + a := *next.Object.(*v1alpha1.Workflow) + if in.WorkflowName == "" || in.WorkflowName == a.Name { + + err = ws.Send(&a) + if err != nil { + log.Warnf("Unable to send stream message: %v", err) + } + } + } + done <- true + }() + select { + case <-ws.Context().Done(): + wfs.Stop() + case <-done: + wfs.Stop() + } + return nil +} + +func (s *WorkflowServer) PodLogs(in *WorkflowLogRequest, log WorkflowService_PodLogsServer) error { + + namespace := s.Namespace + if in.Namespace != "" { + namespace = in.Namespace + } + containerName := "main" + if in.Container != "" { + containerName = in.Container + } + _, kubeClient, err := s.GetWFClient(log.Context()) + + stream, err := kubeClient.CoreV1().Pods(namespace).GetLogs(in.PodName, &corev1.PodLogOptions{ + Container: containerName, + Follow: in.LogOptions.Follow, + Timestamps: true, + SinceSeconds: in.LogOptions.SinceSeconds, + SinceTime: in.LogOptions.SinceTime, + TailLines: in.LogOptions.TailLines, + }).Stream() + + if err == nil { + scanner := bufio.NewScanner(stream) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Split(line, " ") + //logTime, err := time.Parse(time.RFC3339, parts[0]) + byt := []byte(parts[0]) + var logTime v1.Time + err := logTime.UnmarshalText(byt) + if err == nil { + lines := strings.Join(parts[1:], " ") + for _, line := range strings.Split(lines, "\r") { + if line != "" { + cnt := LogEntry{Content: line, TimeStamp: &logTime} + log.Send(&cnt) + } + } + } else { + cnt := LogEntry{Content: line, TimeStamp: &logTime} + log.Send(&cnt) + } + } + } + return err +} + +func (s *WorkflowServer) ApplyWorkflowOptions(wf *v1alpha1.Workflow, opts *SubmitOptions) (*v1alpha1.Workflow, error) { + if opts == nil { + return wf, nil + } + if opts.Entrypoint != "" { + wf.Spec.Entrypoint = opts.Entrypoint + } + if opts.ServiceAccount != "" { + wf.Spec.ServiceAccountName = opts.ServiceAccount + } + labels := wf.GetLabels() + if labels == nil { + labels = make(map[string]string) + } + if opts.Labels != "" { + passedLabels, err := cmdutil.ParseLabels(opts.Labels) + if err != nil { + return nil, fmt.Errorf("Expected labels of the form: NAME1=VALUE2,NAME2=VALUE2. Received: %s", opts.Labels) + } + for k, v := range passedLabels { + labels[k] = v + } + } + if opts.InstanceID != "" { + labels[common.LabelKeyControllerInstanceID] = opts.InstanceID + } + wf.SetLabels(labels) + if len(opts.Parameters) > 0 { + newParams := make([]v1alpha1.Parameter, 0) + passedParams := make(map[string]bool) + for _, paramStr := range opts.Parameters { + parts := strings.SplitN(paramStr, "=", 2) + if len(parts) == 1 { + return nil, fmt.Errorf("Expected parameter of the form: NAME=VALUE. Received: %s", paramStr) + } + param := v1alpha1.Parameter{ + Name: parts[0], + Value: &parts[1], + } + newParams = append(newParams, param) + passedParams[param.Name] = true + } + + for _, param := range wf.Spec.Arguments.Parameters { + if _, ok := passedParams[param.Name]; ok { + // this parameter was overridden via command line + continue + } + newParams = append(newParams, param) + } + wf.Spec.Arguments.Parameters = newParams + } + if opts.GenerateName != "" { + wf.ObjectMeta.GenerateName = opts.GenerateName + } + if opts.Name != "" { + wf.ObjectMeta.Name = opts.Name + } + if opts.OwnerReference != nil { + wf.SetOwnerReferences(append(wf.GetOwnerReferences(), *opts.OwnerReference)) + } + return wf, nil +} diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go index ac724a6128d2..6221b529de4a 100644 --- a/cmd/server/workflow/workflow_service.go +++ b/cmd/server/workflow/workflow_service.go @@ -180,7 +180,3 @@ func (s *KubeService) Terminate(wfClient *versioned.Clientset, namespace string, } return wf, nil } - -func (s *WorkflowServer) PodLogs(*WorkflowLogRequest, WorkflowService_PodLogsServer) error { - panic("implement me") -} \ No newline at end of file diff --git a/hack/generate-proto.sh b/hack/generate-proto.sh index b645ecc94bbb..f462235002db 100755 --- a/hack/generate-proto.sh +++ b/hack/generate-proto.sh @@ -45,77 +45,77 @@ go-to-protobuf \ # server/*/.pb.go from .proto files. golang/protobuf and gogo/protobuf can be used # interchangeably. The difference in the options are: # 1. protoc-gen-go - official golang/protobuf -#go build -i -o dist/protoc-gen-go ./vendor/github.com/golang/protobuf/protoc-gen-go -#GOPROTOBINARY=go +go build -i -o dist/protoc-gen-go ./vendor/github.com/golang/protobuf/protoc-gen-go +GOPROTOBINARY=go # 2. protoc-gen-gofast - fork of golang golang/protobuf. Faster code generation -#go build -i -o dist/protoc-gen-gofast ./vendor/github.com/gogo/protobuf/protoc-gen-gofast -#GOPROTOBINARY=gofast +go build -i -o dist/protoc-gen-gofast ./vendor/github.com/gogo/protobuf/protoc-gen-gofast +GOPROTOBINARY=gofast # 3. protoc-gen-gogofas'export PS1="\[\033[36m\]\u\[\033[m\]@\[\033[32m\]\h:\[\033[33;1m\]\w\[\033[m\]\$ "t - faster code generation and gogo extensions and flexibility in controlling # the generated go code (e.g. customizing field names, nullable fields) -#go build -i -o dist/protoc-gen-gogofast ./vendor/github.com/gogo/protobuf/protoc-gen-gogofast -#GOPROTOBINARY=gogofast +go build -i -o dist/protoc-gen-gogofast ./vendor/github.com/gogo/protobuf/protoc-gen-gogofast +GOPROTOBINARY=gogofast # ## protoc-gen-grpc-gateway is used to build .pb.gw.go files from from .proto files -#go build -i -o dist/protoc-gen-grpc-gateway ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway +go build -i -o dist/protoc-gen-grpc-gateway ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway ## protoc-gen-swagger is used to build swagger.json -#go build -i -o dist/protoc-gen-swagger ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger +go build -i -o dist/protoc-gen-swagger ./vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger # ## Generate server//(.pb.go|.pb.gw.go) -#PROTO_FILES=$(find $PROJECT_ROOT \( -name "*.proto" -and -path '*/server/*' -or -path '*/reposerver/*' -and -name "*.proto" \)) -#for i in ${PROTO_FILES}; do -# # Path to the google API gateway annotations.proto will be different depending if we are -# # building natively (e.g. from workspace) vs. part of a docker build. -# if [ -f /.dockerenv ]; then -# GOOGLE_PROTO_API_PATH=$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis -# GOGO_PROTOBUF_PATH=$GOPATH/src/github.com/gogo/protobuf -# else -# GOOGLE_PROTO_API_PATH=${PROJECT_ROOT}/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis -# GOGO_PROTOBUF_PATH=${PROJECT_ROOT}/vendor/github.com/gogo/protobuf -# fi -# protoc \ -# -I${PROJECT_ROOT} \ -# -I/usr/local/include \ -# -I./vendor \ -# -I$GOPATH/src \ -# -I${GOOGLE_PROTO_API_PATH} \ -# -I${GOGO_PROTOBUF_PATH} \ -# --${GOPROTOBINARY}_out=plugins=grpc:$GOPATH/src \ -# --grpc-gateway_out=logtostderr=true:$GOPATH/src \ -# --swagger_out=logtostderr=true:. \ -# $i -#done -# -## collect_swagger gathers swagger files into a subdirectory -#collect_swagger() { -# SWAGGER_ROOT="$1" -# EXPECTED_COLLISIONS="$2" -# SWAGGER_OUT="${PROJECT_ROOT}/assets/swagger.json" -# PRIMARY_SWAGGER=`mktemp` -# COMBINED_SWAGGER=`mktemp` -# -# cat < "${PRIMARY_SWAGGER}" -#{ -# "swagger": "2.0", -# "info": { -# "title": "Consolidate Services", -# "description": "Description of all APIs", -# "version": "version not set" -# }, -# "paths": {} -#} -#EOF -# -# /bin/rm -f "${SWAGGER_OUT}" -# -# /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -exec /usr/local/bin/swagger mixin -c "${EXPECTED_COLLISIONS}" "${PRIMARY_SWAGGER}" '{}' \+ > "${COMBINED_SWAGGER}" -# /usr/local/bin/jq -r 'del(.definitions[].properties[]? | select(."$ref"!=null and .description!=null).description) | del(.definitions[].properties[]? | select(."$ref"!=null and .title!=null).title)' "${COMBINED_SWAGGER}" > "${SWAGGER_OUT}" -# -# /bin/rm "${PRIMARY_SWAGGER}" "${COMBINED_SWAGGER}" -#} -# -## clean up generated swagger files (should come after collect_swagger) -#clean_swagger() { -# SWAGGER_ROOT="$1" -# /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -delete -#} -# +PROTO_FILES=$(find $PROJECT_ROOT \( -name "*.proto" -and -path '*/server/*' -or -path '*/reposerver/*' -and -name "*.proto" \)) +for i in ${PROTO_FILES}; do + # Path to the google API gateway annotations.proto will be different depending if we are + # building natively (e.g. from workspace) vs. part of a docker build. + if [ -f /.dockerenv ]; then + GOOGLE_PROTO_API_PATH=$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis + GOGO_PROTOBUF_PATH=$GOPATH/src/github.com/gogo/protobuf + else + GOOGLE_PROTO_API_PATH=${PROJECT_ROOT}/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis + GOGO_PROTOBUF_PATH=${PROJECT_ROOT}/vendor/github.com/gogo/protobuf + fi + protoc \ + -I${PROJECT_ROOT} \ + -I/usr/local/include \ + -I./vendor \ + -I$GOPATH/src \ + -I${GOOGLE_PROTO_API_PATH} \ + -I${GOGO_PROTOBUF_PATH} \ + --${GOPROTOBINARY}_out=plugins=grpc:$GOPATH/src \ + --grpc-gateway_out=logtostderr=true:$GOPATH/src \ + --swagger_out=logtostderr=true:. \ + $i +done + +# collect_swagger gathers swagger files into a subdirectory +collect_swagger() { + SWAGGER_ROOT="$1" + EXPECTED_COLLISIONS="$2" + SWAGGER_OUT="${PROJECT_ROOT}/assets/swagger.json" + PRIMARY_SWAGGER=`mktemp` + COMBINED_SWAGGER=`mktemp` + + cat < "${PRIMARY_SWAGGER}" +{ + "swagger": "2.0", + "info": { + "title": "Consolidate Services", + "description": "Description of all APIs", + "version": "version not set" + }, + "paths": {} +} +EOF + + /bin/rm -f "${SWAGGER_OUT}" + + /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -exec /usr/local/bin/swagger mixin -c "${EXPECTED_COLLISIONS}" "${PRIMARY_SWAGGER}" '{}' \+ > "${COMBINED_SWAGGER}" + /usr/local/bin/jq -r 'del(.definitions[].properties[]? | select(."$ref"!=null and .description!=null).description) | del(.definitions[].properties[]? | select(."$ref"!=null and .title!=null).title)' "${COMBINED_SWAGGER}" > "${SWAGGER_OUT}" + + /bin/rm "${PRIMARY_SWAGGER}" "${COMBINED_SWAGGER}" +} + +# clean up generated swagger files (should come after collect_swagger) +clean_swagger() { + SWAGGER_ROOT="$1" + /usr/bin/find "${SWAGGER_ROOT}" -name '*.swagger.json' -delete +} + diff --git a/persist/sqldb/workflow_repository.go b/persist/sqldb/workflow_repository.go index 917444a7e46d..4c9f11ccf000 100644 --- a/persist/sqldb/workflow_repository.go +++ b/persist/sqldb/workflow_repository.go @@ -27,6 +27,7 @@ type ( Get(uid string) (*wfv1.Workflow, error) List(orderBy interface{}) (*wfv1.WorkflowList, error) Query(condition db.Cond, orderBy ...interface{}) ([]wfv1.Workflow, error) + Delete(condition db.Cond)(error) Close() error IsNodeStatusOffload() bool QueryWithPagination(condition db.Cond, pageSize uint, lastID string, orderBy ...interface{})(*wfv1.WorkflowList, error) @@ -242,4 +243,11 @@ func (wdc *WorkflowDBContext) QueryWithPagination(condition db.Cond, pageLimit u wfList.Items = wfs return &wfList, nil +} + +func (wdc *WorkflowDBContext) Delete(condition db.Cond)(error){ + if wdc.Session == nil { + return DBInvalidSession(nil, "DB session is not initialized") + } + return wdc.Session.Collection(wdc.TableName).Find(condition).Delete() } \ No newline at end of file From 5decb9049c796ebe84d0b2c49e8ba29c1056a8bd Mon Sep 17 00:00:00 2001 From: Simon Behar Date: Fri, 22 Nov 2019 10:48:44 -0800 Subject: [PATCH 009/421] Add dev makefiles and fix format issues --- Dockerfile | 10 +- Dockerfile.argo-server-dev | 6 + Gopkg.lock | 31 +++ Makefile | 16 ++ cmd/argo/commands/common.go | 5 - cmd/client/client.go | 126 ++-------- cmd/server/apiserver/argoserver.go | 145 +++++------ cmd/server/main.go | 37 +-- cmd/server/workflow/common.go | 7 +- cmd/server/workflow/workflow_db_service.go | 24 +- cmd/server/workflow/workflow_server.go | 247 ++++++++++--------- cmd/server/workflow/workflow_service.go | 109 ++++---- cmd/server/workflow/workflow_service_test.go | 7 +- persist/sqldb/workflow_repository.go | 121 +++++---- pkg/apis/workflow/v1alpha1/workflow_types.go | 4 +- util/util.go | 11 +- workflow/controller/operator.go | 3 - workflow/controller/workflowpod.go | 2 +- workflow/validate/validate_test.go | 5 +- 19 files changed, 459 insertions(+), 457 deletions(-) create mode 100644 Dockerfile.argo-server-dev diff --git a/Dockerfile b/Dockerfile index 9deee48326e6..3b9063364a62 100644 --- a/Dockerfile +++ b/Dockerfile @@ -75,7 +75,7 @@ RUN cd ${GOPATH}/src/dummy && \ # Perform the build WORKDIR /go/src/github.com/argoproj/argo COPY . . -ARG MAKE_TARGET="controller executor cli-linux-amd64" +ARG MAKE_TARGET="controller executor cli-linux-amd64 argo-server" RUN make $MAKE_TARGET @@ -100,3 +100,11 @@ ENTRYPOINT [ "workflow-controller" ] FROM scratch as argocli COPY --from=argo-build /go/src/github.com/argoproj/argo/dist/argo-linux-amd64 /bin/argo ENTRYPOINT [ "argo" ] + + +#################################################################################################### +# argo-server +#################################################################################################### +FROM scratch as argo-server +COPY --from=argo-build /go/src/github.com/argoproj/argo/dist/argo-server /bin/argo-server +ENTRYPOINT [ "argo-server" ] diff --git a/Dockerfile.argo-server-dev b/Dockerfile.argo-server-dev new file mode 100644 index 000000000000..4e19de350b54 --- /dev/null +++ b/Dockerfile.argo-server-dev @@ -0,0 +1,6 @@ +#################################################################################################### +# argo-server-dev +#################################################################################################### +FROM scratch +COPY argo-server /bin/ +ENTRYPOINT [ "argo-server" ] diff --git a/Gopkg.lock b/Gopkg.lock index 47dafcc4281d..c190ca2aea4d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -55,6 +55,25 @@ pruneopts = "" revision = "de5bf2ad457846296e2031421a34e2568e304e35" +[[projects]] + branch = "master" + digest = "1:a7f619ffc7b99687f9444bd0a07509fec8ae708a7175d234878129896c0918a8" + name = "github.com/alecthomas/template" + packages = [ + ".", + "parse", + ] + pruneopts = "" + revision = "fb15b899a75114aa79cc930e33c46b577cc664b1" + +[[projects]] + branch = "master" + digest = "1:9d943843b71c5d44f184893fcdbe419bf639fee8647ceeca4c7d4fd95923721c" + name = "github.com/alecthomas/units" + packages = ["."] + pruneopts = "" + revision = "f65c72e2690dc4b403c8bd637baf4611cd4c069b" + [[projects]] branch = "master" digest = "1:52905b00a73cda93a2ce8c5fa35185daed673d59e39576e81ad6ab6fb7076b3c" @@ -579,6 +598,7 @@ packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", + "log", "model", ] pruneopts = "" @@ -823,6 +843,8 @@ "cpu", "unix", "windows", + "windows/registry", + "windows/svc/eventlog", ] pruneopts = "" revision = "9109b7679e13aa34a54834cfb4949cac4b96e576" @@ -965,6 +987,14 @@ revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb" version = "v1.23.0" +[[projects]] + digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1" + name = "gopkg.in/alecthomas/kingpin.v2" + packages = ["."] + pruneopts = "" + revision = "947dcec5ba9c011838740e680966fd7087a71d0d" + version = "v2.2.6" + [[projects]] digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" name = "gopkg.in/inf.v0" @@ -1483,6 +1513,7 @@ "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", + "github.com/prometheus/common/log", "github.com/sirupsen/logrus", "github.com/soheilhy/cmux", "github.com/spf13/cobra", diff --git a/Makefile b/Makefile index 86a91482ce55..4ce396950c97 100644 --- a/Makefile +++ b/Makefile @@ -99,6 +99,22 @@ else endif @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)workflow-controller:$(IMAGE_TAG) ; fi +.PHONY: argo-server +argo-server: + CGO_ENABLED=0 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-server ./cmd/server + +.PHONY: argo-server-image +argo-server-image: +ifeq ($(DEV_IMAGE), true) + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o argo-server ./cmd/server + docker build -t $(IMAGE_PREFIX)argo-server:$(IMAGE_TAG) -f Dockerfile.argo-server-dev . + rm -f argo-server +else + docker build -t $(IMAGE_PREFIX)argo-server:$(IMAGE_TAG) --target argo-server . +endif + @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argo-server:$(IMAGE_TAG) ; fi + + .PHONY: executor executor: go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argoexec ./cmd/argoexec diff --git a/cmd/argo/commands/common.go b/cmd/argo/commands/common.go index 541721e213a5..b9714c5c3744 100644 --- a/cmd/argo/commands/common.go +++ b/cmd/argo/commands/common.go @@ -1,7 +1,6 @@ package commands import ( - "encoding/json" "fmt" "log" "os" @@ -72,11 +71,7 @@ func InitKubeClient() *kubernetes.Clientset { if err != nil { log.Fatal(err) } - b,err :=json.Marshal(restConfig) - fmt.Println(err) - - fmt.Println(string(b)) // create the clientset clientset, err = kubernetes.NewForConfig(restConfig) if err != nil { diff --git a/cmd/client/client.go b/cmd/client/client.go index 2e0294221145..a26075b73349 100644 --- a/cmd/client/client.go +++ b/cmd/client/client.go @@ -5,60 +5,13 @@ import ( "encoding/json" "fmt" "github.com/argoproj/argo/cmd/server/workflow" - wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/util" + "github.com/prometheus/common/log" "google.golang.org/grpc" "google.golang.org/grpc/metadata" - "os" - "sigs.k8s.io/yaml" - ) -var wfStr = ` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - generateName: hello-world- -spec: - entrypoint: whalesay - templates: - - name: whalesay - container: - image: docker/whalesay:latest - command: [cowsay] - args: ["hello world"] -` - - -func unmarshalWF(yamlStr string) *wfv1.Workflow { - var wf wfv1.Workflow - err := yaml.Unmarshal([]byte(yamlStr), &wf) - if err != nil { - panic(err) - } - return &wf -} - -func homeDir() string { - if h := os.Getenv("HOME"); h != "" { - return h - } - return os.Getenv("USERPROFILE") // windows -} - - - -//func generate(){ -// -// kubeConfigFlags := genericclioptions.NewConfigFlags(true) -// //kubeConfigFlags.AddFlags(flags) -// matchVersionKubeConfigFlags := cmdutil.NewMatchVersionFlags(kubeConfigFlags) -// //matchVersionKubeConfigFlags.AddFlags(cmds.PersistentFlags()) -// f := cmdutil.NewFactory(nil) -// f.RESTClient() -// -//} -func main(){ +func main() { //generate() conn, err := grpc.Dial("localhost:8080", grpc.WithInsecure()) if err != nil { @@ -68,61 +21,30 @@ func main(){ client := workflow.NewWorkflowServiceClient(conn) //wf := unmarshalWF(wfStr) config := util.InitKubeClient() - // - ////tc, err :=config.TransportConfig() - // - var clientConfig workflow.ClientConfig - // - clientConfig.Host = config.Host - clientConfig.APIPath = config.APIPath - clientConfig.TLSClientConfig = config.TLSClientConfig - clientConfig.Username = config.Username - clientConfig.Password = config.Password - clientConfig.AuthProvider = config.AuthProvider - // - // - // - by,err := json.Marshal(clientConfig) - fmt.Println(err) - // - md := metadata.Pairs(workflow.CLIENT_REST_CONFIG, string(by)) + + clientConfig := workflow.ClientConfig{ + Host: config.Host, + APIPath: config.APIPath, + TLSClientConfig: config.TLSClientConfig, + Username: config.Username, + Password: config.Password, + AuthProvider: config.AuthProvider, + } + + marshalledClientConfig, err := json.Marshal(clientConfig) + if err != nil { + log.Fatal(err) + } + + md := metadata.Pairs(workflow.CLIENT_REST_CONFIG, string(marshalledClientConfig)) ctx := metadata.NewOutgoingContext(context.Background(), md) - //wq := workflow.WorkflowQuery{} - //created, err :=client.Get(ctx,&wq) - // - //fmt.Println("errr",err) - // - fmt.Println(string(by)) - wq := workflow.WorkflowListRequest { Namespace:"default"} + fmt.Println(string(marshalledClientConfig)) + + wq := workflow.WorkflowListRequest{Namespace: "default"} + queried, err := client.List(ctx, &wq) - if err !=nil { - fmt.Println("errr",err) + if err != nil { + log.Fatal(err) } fmt.Println(queried) - //var wuq workflow.WorkflowUpdateQuery - ////wuq.Workflow = queried - ////wur, err := client.Retry(context.TODO(), &wuq) - //// - ////if err !=nil { - //// fmt.Println("errr",err) - ////} - ////fmt.Println(wur) - //// - ////name := "scripts-bash-5ksp4" - ////query := workflow.WorkflowQuery{Name: name,} - //// - //// - //created, err :=client.Create(ctx,wf) - //if err !=nil { - // fmt.Println("errr",err) - //} - //fmt.Println(created) - // - ////byte1, err := wflist.Workflows.Marshal() - ////for inx,_ := range wflist.Workflows { - //// fmt.Println("Response:", wflist.Workflows[inx].Name) - //// - ////} - - } diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index e6dd57def61d..01de1fe21311 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -18,7 +18,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "net" - "regexp" "sigs.k8s.io/yaml" "fmt" @@ -47,10 +46,14 @@ type ArgoServerOpts struct { ConfigName string } -func NewArgoServer(ctx context.Context, opts ArgoServerOpts) *ArgoServer { - - return &ArgoServer{Namespace: opts.Namespace, WfClientSet: opts.WfClientSet, KubeClientset: opts.KubeClientset, - EnableClientAuth: opts.EnableClientAuth, ConfigName: opts.ConfigName} +func NewArgoServer(opts ArgoServerOpts) *ArgoServer { + return &ArgoServer{ + Namespace: opts.Namespace, + WfClientSet: opts.WfClientSet, + KubeClientset: opts.KubeClientset, + EnableClientAuth: opts.EnableClientAuth, + ConfigName: opts.ConfigName, + } } var backoff = wait.Backoff{ @@ -61,32 +64,35 @@ var backoff = wait.Backoff{ } func (as *ArgoServer) useTLS() bool { - return false } func (as *ArgoServer) Run(ctx context.Context, port int) { - grpcs := as.newGRPCServer() - var httpS *http.Server - var httpsS *http.Server + grpcServer := as.newGRPCServer() + var httpServer *http.Server + var httpsServer *http.Server if as.useTLS() { - httpS = newRedirectServer(port) - httpsS = as.newHTTPServer(ctx, port) + httpServer = newRedirectServer(port) + httpsServer = as.newHTTPServer(ctx, port) } else { - httpS = as.newHTTPServer(ctx, port) + httpServer = as.newHTTPServer(ctx, port) } // Start listener var conn net.Listener - var realErr error - _ = wait.ExponentialBackoff(backoff, func() (bool, error) { - conn, realErr = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) - if realErr != nil { - log.Warnf("failed listen: %v", realErr) + var listerErr error + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + conn, listerErr = net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + if listerErr != nil { + log.Warnf("failed to listen: %v", listerErr) return false, nil } return true, nil }) + if err != nil { + log.Error(err) + return + } // Cmux is used to support servicing gRPC and HTTP1.1+JSON on the same port tcpm := cmux.New(conn) @@ -113,7 +119,7 @@ func (as *ArgoServer) Run(ctx context.Context, port int) { tlsl = tls.NewListener(tlsl, &tlsConfig) // Now, we build another mux recursively to match HTTPS and gRPC. - tlsm = cmux.New(tlsl) + tlsm := cmux.New(tlsl) httpsL = tlsm.Match(cmux.HTTP1Fast()) grpcL = tlsm.Match(cmux.Any()) } @@ -125,22 +131,22 @@ func (as *ArgoServer) Run(ctx context.Context, port int) { //if err != nil { // log.Fatalf("failed to listen: %v", err) //} - go func() { as.checkServeErr("grpcS", grpcs.Serve(grpcL)) }() - go func() { as.checkServeErr("httpS", httpS.Serve(httpL)) }() + + go func() { as.checkServeErr("grpcServer", grpcServer.Serve(grpcL)) }() + go func() { as.checkServeErr("httpServer", httpServer.Serve(httpL)) }() + go func() { as.checkServeErr("tcpm", tcpm.Serve()) }() if as.useTLS() { - go func() { as.checkServeErr("httpsS", httpsS.Serve(httpsL)) }() + go func() { as.checkServeErr("httpsServer", httpsServer.Serve(httpsL)) }() go func() { as.checkServeErr("tlsm", tlsm.Serve()) }() } - go func() { as.checkServeErr("tcpm", tcpm.Serve()) }() as.stopCh = make(chan struct{}) <-as.stopCh - } func (as *ArgoServer) newGRPCServer() *grpc.Server { sOpts := []grpc.ServerOption{ - // Set the both send and receive the bytes limit to be 100MB + // Set both the send and receive the bytes limit to be 100MB // The proper way to achieve high performance is to have pagination // while we work toward that, we can have high limit first grpc.MaxRecvMsgSize(apiclient.MaxGRPCMessageSize), @@ -148,15 +154,16 @@ func (as *ArgoServer) newGRPCServer() *grpc.Server { grpc.ConnectionTimeout(300 * time.Second), } - grpcS := grpc.NewServer(sOpts...) + grpcServer := grpc.NewServer(sOpts...) configMap, err := as.RsyncConfig(as.Namespace, as.WfClientSet, as.KubeClientset) if err != nil { - //panic("Error marshalling config map") + // TODO: this currently returns an error every time + log.Errorf("Error marshalling config map: %s", err) } workflowServer := workflow.NewWorkflowServer(as.Namespace, as.WfClientSet, as.KubeClientset, configMap, as.EnableClientAuth) - workflow.RegisterWorkflowServiceServer(grpcS, workflowServer) + workflow.RegisterWorkflowServiceServer(grpcServer, workflowServer) - return grpcS + return grpcServer } // newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented @@ -165,29 +172,29 @@ func (a *ArgoServer) newHTTPServer(ctx context.Context, port int) *http.Server { endpoint := fmt.Sprintf("localhost:%d", port) mux := http.NewServeMux() - httpS := http.Server{ + httpServer := http.Server{ Addr: endpoint, Handler: mux, } - var dOpts []grpc.DialOption - dOpts = append(dOpts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(apiclient.MaxGRPCMessageSize))) - //dOpts = append(dOpts, grpc.WithUserAgent(fmt.Sprintf("%s/%s", common.ArgoCDUserAgentName, argocd.GetVersion().Version))) + var dialOpts []grpc.DialOption + dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(apiclient.MaxGRPCMessageSize))) + //dialOpts = append(dialOpts, grpc.WithUserAgent(fmt.Sprintf("%s/%s", common.ArgoCDUserAgentName, argocd.GetVersion().Version))) - dOpts = append(dOpts, grpc.WithInsecure()) + dialOpts = append(dialOpts, grpc.WithInsecure()) // HTTP 1.1+JSON Server // grpc-ecosystem/grpc-gateway is used to proxy HTTP requests to the corresponding gRPC call // NOTE: if a marshaller option is not supplied, grpc-gateway will default to the jsonpb from // golang/protobuf. Which does not support types such as time.Time. gogo/protobuf does support // time.Time, but does not support custom UnmarshalJSON() and MarshalJSON() methods. Therefore - //// we use our own Marshaler + // we use our own Marshaler gwMuxOpts := runtime.WithMarshalerOption(runtime.MIMEWildcard, new(json.JSONMarshaler)) gwCookieOpts := runtime.WithForwardResponseOption(a.translateGrpcCookieHeader) gwmux := runtime.NewServeMux(gwMuxOpts, gwCookieOpts) - mustRegisterGWHandler(workflow.RegisterWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dOpts) + mustRegisterGWHandler(workflow.RegisterWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mux.Handle("/api/", gwmux) - return &httpS + return &httpServer } type registerFunc func(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) error @@ -200,41 +207,41 @@ func mustRegisterGWHandler(register registerFunc, ctx context.Context, mux *runt } } -type handlerSwitcher struct { - handler http.Handler - contentTypeToHandler map[string]http.Handler -} - -func (s *handlerSwitcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if contentHandler, ok := s.contentTypeToHandler[r.Header.Get("content-type")]; ok { - contentHandler.ServeHTTP(w, r) - } else { - s.handler.ServeHTTP(w, r) - } -} +//type handlerSwitcher struct { +// handler http.Handler +// contentTypeToHandler map[string]http.Handler +//} +// +//func (s *handlerSwitcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { +// if contentHandler, ok := s.contentTypeToHandler[r.Header.Get("content-type")]; ok { +// contentHandler.ServeHTTP(w, r) +// } else { +// s.handler.ServeHTTP(w, r) +// } +//} // Workaround for https://github.com/golang/go/issues/21955 to support escaped URLs in URL path. -type bug21955Workaround struct { - handler http.Handler -} - -var pathPatters = []*regexp.Regexp{ - regexp.MustCompile(`/api/v1/workflows/[^/]+`), -} - -func (bf *bug21955Workaround) ServeHTTP(w http.ResponseWriter, r *http.Request) { - for _, pattern := range pathPatters { - if pattern.MatchString(r.URL.RawPath) { - r.URL.Path = r.URL.RawPath - break - } - } - bf.handler.ServeHTTP(w, r) -} - -func bug21955WorkaroundInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - return handler(ctx, req) -} +//type bug21955Workaround struct { +// handler http.Handler +//} +// +//var pathPatters = []*regexp.Regexp{ +// regexp.MustCompile(`/api/v1/workflows/[^/]+`), +//} +// +//func (bf *bug21955Workaround) ServeHTTP(w http.ResponseWriter, r *http.Request) { +// for _, pattern := range pathPatters { +// if pattern.MatchString(r.URL.RawPath) { +// r.URL.Path = r.URL.RawPath +// break +// } +// } +// bf.handler.ServeHTTP(w, r) +//} + +//func bug21955WorkaroundInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { +// return handler(ctx, req) +//} // newRedirectServer returns an HTTP server which does a 307 redirect to the HTTPS server func newRedirectServer(port int) *http.Server { diff --git a/cmd/server/main.go b/cmd/server/main.go index b665f4d1a60b..3b371437b8b6 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -29,23 +29,22 @@ const ( // NewRootCommand returns an new instance of the workflow-controller main entrypoint func NewRootCommand() *cobra.Command { var ( - clientConfig clientcmd.ClientConfig - logLevel string // --loglevel - enableClientAuth string - configMap string - port int + clientConfig clientcmd.ClientConfig + logLevel string // --loglevel + enableClientAuth string + configMap string + port int ) var command = cobra.Command{ Use: CLIName, - Short: "Argo api server", + Short: "argo-api-server is Argo's API server", RunE: func(c *cobra.Command, args []string) error { cli.SetLogLevel(logLevel) stats.RegisterStackDumper() stats.StartStatsTicker(5 * time.Minute) config, err := clientConfig.ClientConfig() - if err != nil { return err } @@ -58,19 +57,25 @@ func NewRootCommand() *cobra.Command { } kubeConfig := kubernetes.NewForConfigOrDie(config) - wflientset := wfclientset.NewForConfigOrDie(config) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientAuth, err := strconv.ParseBool(enableClientAuth) if err != nil { return err } - ctx, cancel := context.WithCancel(context.Background()) - var clientAuth bool - clientAuth, err =strconv.ParseBool( enableClientAuth) - var opts = apiserver.ArgoServerOpts{Namespace: namespace, WfClientSet: wflientset,KubeClientset: kubeConfig, EnableClientAuth: clientAuth} - argoSvr := apiserver.NewArgoServer(ctx, opts ) - defer cancel() - go argoSvr.Run(ctx,port) + + opts := apiserver.ArgoServerOpts{ + Namespace: namespace, + WfClientSet: wflientset, + KubeClientset: kubeConfig, + EnableClientAuth: clientAuth, + } + apiServer := apiserver.NewArgoServer(opts) + + go apiServer.Run(ctx, port) // Wait forever select {} @@ -92,4 +97,4 @@ func main() { fmt.Println(err) os.Exit(1) } -} \ No newline at end of file +} diff --git a/cmd/server/workflow/common.go b/cmd/server/workflow/common.go index bafb56a0b47f..7a45379665e5 100644 --- a/cmd/server/workflow/common.go +++ b/cmd/server/workflow/common.go @@ -7,10 +7,9 @@ import ( "time" ) -const CLIENT_REST_CONFIG = "rest.config" +const CLIENT_REST_CONFIG = "rest.config" const AUTH_TOKEN = "auth.token" - type ClientConfig struct { // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. @@ -63,6 +62,4 @@ type ClientConfig struct { // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. Timeout time.Duration - - -} \ No newline at end of file +} diff --git a/cmd/server/workflow/workflow_db_service.go b/cmd/server/workflow/workflow_db_service.go index e99dccea33c3..87874d854770 100644 --- a/cmd/server/workflow/workflow_db_service.go +++ b/cmd/server/workflow/workflow_db_service.go @@ -58,32 +58,34 @@ func (db *DBService) List(namespace string, pageSize uint, lastId string) (*v1al if db.wfDBctx == nil { return nil, errors.New(errors.CodeInternal, "DB Context is not initialized") } - var wfList *v1alpha1.WorkflowList - - var err error var cond dblib.Cond if namespace != "" { cond = dblib.Cond{"namespace": namespace} } - if pageSize == 0 { - wfList.Items, err = db.wfDBctx.Query(cond) - } else { - wfList, err = db.wfDBctx.QueryWithPagination(cond, pageSize, lastId) + if pageSize == 0 { + items, err := db.wfDBctx.Query(cond) + if err != nil { + return nil, err + } + return &v1alpha1.WorkflowList{ + Items: items, + }, nil } + + wfList, err := db.wfDBctx.QueryWithPagination(cond, pageSize, lastId) if err != nil { return nil, err } - - return wfList, err + return wfList, nil } -func (db *DBService) Delete(wfName string, namespace string) (error) { +func (db *DBService) Delete(wfName string, namespace string) error { if db.wfDBctx == nil { return errors.New(errors.CodeInternal, "DB Context is not initialized") } cond := dblib.Cond{"name": wfName, "namespace": namespace} return db.wfDBctx.Delete(cond) -} \ No newline at end of file +} diff --git a/cmd/server/workflow/workflow_server.go b/cmd/server/workflow/workflow_server.go index a2b9907cb526..c86716ef06fa 100644 --- a/cmd/server/workflow/workflow_server.go +++ b/cmd/server/workflow/workflow_server.go @@ -37,7 +37,6 @@ type WorkflowServer struct { } func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) *WorkflowServer { - wfServer := WorkflowServer{Namespace: namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} var err error if config != nil && config.Persistence != nil { @@ -51,17 +50,14 @@ func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeC } func (s *WorkflowServer) CreatePersistenceContext(namespace string, kubeClientSet *kubernetes.Clientset, config *config.PersistConfig) (*sqldb.WorkflowDBContext, error) { - var wfDBCtx sqldb.WorkflowDBContext var err error - //wfDBCtx.TableName = wfc.Config.Persistence.TableName wfDBCtx.NodeStatusOffload = config.NodeStatusOffload - wfDBCtx.Session, wfDBCtx.TableName, err = sqldb.CreateDBSession(kubeClientSet, namespace, config) if err != nil { - log.Errorf("Error in createPersistenceContext. %v", err) + log.Errorf("Error in createPersistenceContext: %s", err) return nil, err } @@ -69,7 +65,6 @@ func (s *WorkflowServer) CreatePersistenceContext(namespace string, kubeClientSe } func (s *WorkflowServer) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { - md, _ := metadata.FromIncomingContext(ctx) if !s.EnableClientAuth { @@ -92,39 +87,40 @@ func (s *WorkflowServer) GetWFClient(ctx context.Context) (*versioned.Clientset, if err != nil { return nil, nil, err } - restConfig.BearerToken = string(bearerToken) - // create the clientset + restConfig.BearerToken = bearerToken + wfClientset, err := wfclientset.NewForConfig(&restConfig) + if err != nil { + log.Errorf("Failure to create WfClientset with ClientConfig '%+v': %s", restConfig, err) + return nil, nil, err + } - // create the clientset clientset, err := kubernetes.NewForConfig(&restConfig) - if err != nil { - log.Warnf("Failure to create WfClientset. ClientConfig: %s, Error: %s", restConfig, err) + log.Errorf("Failure to create KubeClientset with ClientConfig '%+v': %s", restConfig, err) return nil, nil, err } return wfClientset, clientset, nil } -func (s *WorkflowServer) Create(ctx context.Context, in *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { - +func (s *WorkflowServer) Create(ctx context.Context, wfReq *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { wfClient, _, err := s.GetWFClient(ctx) if err != nil { return nil, err } namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace } - if in.Workflow == nil { - return nil, errors.New("Workflow body not found") + if wfReq.Workflow == nil { + return nil, fmt.Errorf("workflow body not specified") } - in.Workflow.Namespace = namespace + wfReq.Workflow.Namespace = namespace - wf, err := s.ApplyWorkflowOptions(in.Workflow, in.SubmitOptions) + wf, err := s.ApplyWorkflowOptions(wfReq.Workflow, wfReq.SubmitOptions) if err != nil { return nil, err } @@ -134,37 +130,37 @@ func (s *WorkflowServer) Create(ctx context.Context, in *WorkflowCreateRequest) return nil, err } - if in.SubmitOptions != nil && in.SubmitOptions.ServerDryRun { + if wfReq.SubmitOptions != nil && wfReq.SubmitOptions.ServerDryRun { return util.CreateServerDryRun(wf, wfClient) } - wf, err = s.WfKubeService.Create(wfClient, namespace, in.Workflow) + wf, err = s.WfKubeService.Create(wfClient, namespace, wfReq.Workflow) if err != nil { - log.Warnf("Create request is failed. Error: %s", err) + log.Errorf("Create request is failed. Error: %s", err) return nil, err } - log.Info("Workflow created successfully. Name: %s", wf.Name) + log.Infof("Workflow '%s' created successfully", wf.Name) return wf, nil } -func (s *WorkflowServer) Get(ctx context.Context, in *WorkflowGetRequest) (*v1alpha1.Workflow, error) { - - namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace - } +func (s *WorkflowServer) Get(ctx context.Context, wfReq *WorkflowGetRequest) (*v1alpha1.Workflow, error) { wfClient, _, err := s.GetWFClient(ctx) - if err != nil { return nil, err } + var wf *v1alpha1.Workflow + namespace := s.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace + } if s.WfDBService != nil { - wf, err = s.WfDBService.Get(in.WorkflowName, in.Namespace) + wf, err = s.WfDBService.Get(wfReq.WorkflowName, wfReq.Namespace) } else { - wf, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + + wf, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) } if err != nil { return nil, err @@ -173,24 +169,21 @@ func (s *WorkflowServer) Get(ctx context.Context, in *WorkflowGetRequest) (*v1al return wf, err } -func (s *WorkflowServer) List(ctx context.Context, in *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { - - namespace := s.Namespace - - if in.Namespace != "" { - namespace = in.Namespace - } +func (s *WorkflowServer) List(ctx context.Context, wfReq *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { wfClient, _, err := s.GetWFClient(ctx) - if err != nil { return nil, err } - listOpt := in.ListOptions + var wfList *v1alpha1.WorkflowList + namespace := s.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace + } + if s.WfDBService != nil { - wfList, err = s.WfDBService.List(namespace, uint(listOpt.Limit), "") + wfList, err = s.WfDBService.List(namespace, uint(wfReq.ListOptions.Limit), "") } else { - wfList, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).List(v1.ListOptions{}) } if err != nil { @@ -198,42 +191,41 @@ func (s *WorkflowServer) List(ctx context.Context, in *WorkflowListRequest) (*v1 } return wfList, nil - } -func (s *WorkflowServer) Delete(ctx context.Context, in *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { - namespace := s.Namespace - - if in.Namespace != "" { - namespace = in.Namespace - } +func (s *WorkflowServer) Delete(ctx context.Context, wfReq *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { wfClient, _, err := s.GetWFClient(ctx) - if err != nil { return nil, err } + + namespace := s.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace + } + if s.WfDBService != nil { - err = s.WfDBService.Delete(in.WorkflowName, in.Namespace) + err = s.WfDBService.Delete(wfReq.WorkflowName, wfReq.Namespace) if err != nil { return nil, err } } - err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(in.WorkflowName, &v1.DeleteOptions{}) + err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(wfReq.WorkflowName, &v1.DeleteOptions{}) if err != nil { return nil, err } - var rsp WorkflowDeleteResponse - rsp.WorkflowName = in.WorkflowName - rsp.Status = "Deleted" - return &rsp, nil + return &WorkflowDeleteResponse{ + WorkflowName: wfReq.WorkflowName, + Status: "Deleted", + }, nil } -func (s *WorkflowServer) Retry(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { +func (s *WorkflowServer) Retry(ctx context.Context, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace } wfClient, kubeClient, err := s.GetWFClient(ctx) @@ -241,7 +233,7 @@ func (s *WorkflowServer) Retry(ctx context.Context, in *WorkflowUpdateRequest) ( if err != nil { return nil, err } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err @@ -256,129 +248,145 @@ func (s *WorkflowServer) Retry(ctx context.Context, in *WorkflowUpdateRequest) ( } func (s *WorkflowServer) Resubmit(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + wfClient, _, err := s.GetWFClient(ctx) + if err != nil { + return nil, err + } + namespace := s.Namespace if in.Namespace != "" { namespace = in.Namespace } - wfClient, _, err := s.GetWFClient(ctx) - + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) - newWF, err := util.FormulateResubmitWorkflow(wf, in.Memoized) - + if err != nil { + return nil, err + } created, err := util.SubmitWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfClient, namespace, newWF, nil) - if err != nil { - fmt.Println(err) return nil, err } return created, err } -func (s *WorkflowServer) Resume(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { +func (s *WorkflowServer) Resume(ctx context.Context, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + wfClient, _, err := s.GetWFClient(ctx) + if err != nil { + return nil, err + } + namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace } - wfClient, _, err := s.GetWFClient(ctx) + err = util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) if err != nil { + log.Warnf("Failed to resume '%s': %s", wfReq.WorkflowName, err) return nil, err } - err = util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { - log.Warnf("Failed to resume %s: %+v", in.WorkflowName, err) return nil, err } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + return wf, nil +} +func (s *WorkflowServer) Suspend(ctx context.Context, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + wfClient, _, err := s.GetWFClient(ctx) if err != nil { return nil, err } - return wf, nil -} -func (s *WorkflowServer) Suspend(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace } - wfClient, _, err := s.GetWFClient(ctx) - + err = util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) if err != nil { return nil, err } - err = util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } + return wf, nil } -func (s *WorkflowServer) Terminate(ctx context.Context, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { +func (s *WorkflowServer) Terminate(ctx context.Context, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + wfClient, _, err := s.GetWFClient(ctx) + if err != nil { + return nil, err + } + namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace } - wfClient, _, err := s.GetWFClient(ctx) + err = util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) if err != nil { return nil, err } - err = util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } + return wf, nil } -func (s *WorkflowServer) Lint(ctx context.Context, in *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { +func (s *WorkflowServer) Lint(ctx context.Context, wfReq *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { wfClient, _, err := s.GetWFClient(ctx) + if err != nil { + return nil, err + } + namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace } - err = validate.ValidateWorkflow(wfClient, namespace, in.Workflow, validate.ValidateOpts{}) + err = validate.ValidateWorkflow(wfClient, namespace, wfReq.Workflow, validate.ValidateOpts{}) if err != nil { return nil, err } - return in.Workflow, nil + + return wfReq.Workflow, nil } -func (s *WorkflowServer) Watch(in *WorkflowGetRequest, ws WorkflowService_WatchServer) error { - namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace - } +func (s *WorkflowServer) Watch(wfReq *WorkflowGetRequest, ws WorkflowService_WatchServer) error { wfClient, _, err := s.GetWFClient(ws.Context()) - if err != nil { return err } + + namespace := s.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace + } + wfs, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Watch(v1.ListOptions{}) if err != nil { return err } + done := make(chan bool) go func() { for next := range wfs.ResultChan() { a := *next.Object.(*v1alpha1.Workflow) - if in.WorkflowName == "" || in.WorkflowName == a.Name { - + if wfReq.WorkflowName == "" || wfReq.WorkflowName == a.Name { err = ws.Send(&a) if err != nil { log.Warnf("Unable to send stream message: %v", err) @@ -387,34 +395,39 @@ func (s *WorkflowServer) Watch(in *WorkflowGetRequest, ws WorkflowService_WatchS } done <- true }() + select { case <-ws.Context().Done(): wfs.Stop() case <-done: wfs.Stop() } + return nil } -func (s *WorkflowServer) PodLogs(in *WorkflowLogRequest, log WorkflowService_PodLogsServer) error { +func (s *WorkflowServer) PodLogs(wfReq *WorkflowLogRequest, log WorkflowService_PodLogsServer) error { + _, kubeClient, err := s.GetWFClient(log.Context()) + if err != nil { + return err + } namespace := s.Namespace - if in.Namespace != "" { - namespace = in.Namespace + if wfReq.Namespace != "" { + namespace = wfReq.Namespace } containerName := "main" - if in.Container != "" { - containerName = in.Container + if wfReq.Container != "" { + containerName = wfReq.Container } - _, kubeClient, err := s.GetWFClient(log.Context()) - stream, err := kubeClient.CoreV1().Pods(namespace).GetLogs(in.PodName, &corev1.PodLogOptions{ + stream, err := kubeClient.CoreV1().Pods(namespace).GetLogs(wfReq.PodName, &corev1.PodLogOptions{ Container: containerName, - Follow: in.LogOptions.Follow, + Follow: wfReq.LogOptions.Follow, Timestamps: true, - SinceSeconds: in.LogOptions.SinceSeconds, - SinceTime: in.LogOptions.SinceTime, - TailLines: in.LogOptions.TailLines, + SinceSeconds: wfReq.LogOptions.SinceSeconds, + SinceTime: wfReq.LogOptions.SinceTime, + TailLines: wfReq.LogOptions.TailLines, }).Stream() if err == nil { @@ -431,12 +444,12 @@ func (s *WorkflowServer) PodLogs(in *WorkflowLogRequest, log WorkflowService_Pod for _, line := range strings.Split(lines, "\r") { if line != "" { cnt := LogEntry{Content: line, TimeStamp: &logTime} - log.Send(&cnt) + _ = log.Send(&cnt) } } } else { cnt := LogEntry{Content: line, TimeStamp: &logTime} - log.Send(&cnt) + _ = log.Send(&cnt) } } } diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go index 6221b529de4a..2d7e20502791 100644 --- a/cmd/server/workflow/workflow_service.go +++ b/cmd/server/workflow/workflow_service.go @@ -3,7 +3,6 @@ package workflow import ( "encoding/json" "errors" - "fmt" "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" @@ -24,11 +23,15 @@ type KubeService struct { } func NewKubeServer(Namespace string, wfClientset *wfclientset.Clientset, kubeClientSet *kubernetes.Clientset, enableClientAuth bool) *KubeService { - return &KubeService{Namespace: Namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} + return &KubeService{ + Namespace: Namespace, + WfClientset: wfClientset, + KubeClientset: kubeClientSet, + EnableClientAuth: enableClientAuth, + } } func (s *KubeService) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { - md, _ := metadata.FromIncomingContext(ctx) if s.EnableClientAuth { @@ -51,132 +54,130 @@ func (s *KubeService) GetWFClient(ctx context.Context) (*versioned.Clientset, *k if err != nil { return nil, nil, err } - restConfig.BearerToken = string(bearerToken) - // create the clientset + restConfig.BearerToken = bearerToken + wfClientset, err := wfclientset.NewForConfig(&restConfig) + if err != nil { + log.Errorf("Failure to create WfClientset with ClientConfig '%+v': %s", restConfig, err) + return nil, nil, err + } - // create the clientset clientset, err := kubernetes.NewForConfig(&restConfig) - if err != nil { - log.Warnf("Failure to create WfClientset. ClientConfig: %s, Error: %s", restConfig, err) + log.Errorf("Failure to create KubeClientset with ClientConfig '%+v': %s", restConfig, err) return nil, nil, err } return wfClientset, clientset, nil } -func (s *KubeService) Create(wfClient *versioned.Clientset, namespace string, in *v1alpha1.Workflow) (*v1alpha1.Workflow, error) { - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Create(in) +func (s *KubeService) Create(wfClient *versioned.Clientset, namespace string, wf *v1alpha1.Workflow) (*v1alpha1.Workflow, error) { + createdWf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Create(wf) if err != nil { log.Warnf("Create request is failed. Error: %s", err) return nil, err } - log.Info("Workflow created successfully. Name: %s", wf.Name) - return wf, nil -} - -func (s *KubeService) Get(wfClient *versioned.Clientset, namespace string, in *WorkflowGetRequest) (*v1alpha1.Workflow, error) { - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + log.Infof("Workflow created successfully. Name: %s", createdWf.Name) + return createdWf, nil +} +func (s *KubeService) Get(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowGetRequest) (*v1alpha1.Workflow, error) { + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } - return wf, err } -func (s *KubeService) List(wfClient *versioned.Clientset, namespace string, in *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { - +func (s *KubeService) List(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { wfList, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).List(v1.ListOptions{}) if err != nil { - fmt.Println(err) + return nil, err } - return wfList, nil - } -func (s *KubeService) Delete(wfClient *versioned.Clientset, namespace string, in *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { - - err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(in.WorkflowName, &v1.DeleteOptions{}) +func (s *KubeService) Delete(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { + err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(wfReq.WorkflowName, &v1.DeleteOptions{}) if err != nil { - log.Fatal(err) return nil, err } - //fmt.Sprint("Workflow '%s' deleted\n", in.WorkflowName) - return nil , nil + return nil, nil } -func (s *KubeService) Retry(wfClient *versioned.Clientset, kubeClient *kubernetes.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) - +func (s *KubeService) Retry(wfClient *versioned.Clientset, kubeClient *kubernetes.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } wf, err = util.RetryWorkflow(kubeClient, wfClient.ArgoprojV1alpha1().Workflows(namespace), wf) - if err != nil { return nil, err } return wf, err } -func (s *KubeService) Resubmit(wfClient *versioned.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) +func (s *KubeService) Resubmit(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) + if err != nil { + return nil, err + } - newWF, err := util.FormulateResubmitWorkflow(wf, in.Memoized) + newWF, err := util.FormulateResubmitWorkflow(wf, wfReq.Memoized) + if err != nil { + return nil, err + } created, err := util.SubmitWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfClient, namespace, newWF, nil) - if err != nil { - fmt.Println(err) return nil, err } return created, err } -func (s *KubeService) Resume(wfClient *versioned.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - - err := util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) +func (s *KubeService) Resume(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + err := util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) if err != nil { - log.Warnf("Failed to resume %s: %+v", in.WorkflowName, err) + log.Warnf("Failed to resume %s: %+v", wfReq.WorkflowName, err) return nil, err } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) - + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } + return wf, nil } -func (s *KubeService) Suspend(wfClient *versioned.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - - err := util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) +func (s *KubeService) Suspend(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + err := util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) + if err != nil { + return nil, err + } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } + return wf, nil } -func (s *KubeService) Terminate(wfClient *versioned.Clientset, namespace string, in *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - - err := util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), in.WorkflowName) +func (s *KubeService) Terminate(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { + err := util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) + if err != nil { + return nil, err + } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } + return wf, nil } diff --git a/cmd/server/workflow/workflow_service_test.go b/cmd/server/workflow/workflow_service_test.go index e46ee67a942c..7b7b5ac2c62b 100644 --- a/cmd/server/workflow/workflow_service_test.go +++ b/cmd/server/workflow/workflow_service_test.go @@ -8,7 +8,6 @@ import ( "testing" ) - var wf = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -159,11 +158,11 @@ func unmarshalWF(yamlStr string) *wfv1.Workflow { return &wf } -func TestMarshalling(t *testing.T){ +func TestMarshalling(t *testing.T) { workf := unmarshalWF(wf) - wr :=WorkflowResponse{ Workflows:workf} + wr := WorkflowResponse{Workflows: workf} bytes, err := wr.Marshal() if err != nil { @@ -171,6 +170,6 @@ func TestMarshalling(t *testing.T){ wr1 := WorkflowResponse{} wr1.Unmarshal(bytes) fmt.Println(wr1) - assert.Equal(t, wr,wr1) + assert.Equal(t, wr, wr1) } diff --git a/persist/sqldb/workflow_repository.go b/persist/sqldb/workflow_repository.go index 4c9f11ccf000..6d506915be4d 100644 --- a/persist/sqldb/workflow_repository.go +++ b/persist/sqldb/workflow_repository.go @@ -15,25 +15,22 @@ import ( wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" ) -type ( - WorkflowDBContext struct { - TableName string - NodeStatusOffload bool - Session sqlbuilder.Database - } - - DBRepository interface { - Save(wf *wfv1.Workflow) error - Get(uid string) (*wfv1.Workflow, error) - List(orderBy interface{}) (*wfv1.WorkflowList, error) - Query(condition db.Cond, orderBy ...interface{}) ([]wfv1.Workflow, error) - Delete(condition db.Cond)(error) - Close() error - IsNodeStatusOffload() bool - QueryWithPagination(condition db.Cond, pageSize uint, lastID string, orderBy ...interface{})(*wfv1.WorkflowList, error) +type WorkflowDBContext struct { + TableName string + NodeStatusOffload bool + Session sqlbuilder.Database +} - } -) +type DBRepository interface { + Save(wf *wfv1.Workflow) error + Get(uid string) (*wfv1.Workflow, error) + List(orderBy interface{}) (*wfv1.WorkflowList, error) + Query(condition db.Cond, orderBy ...interface{}) ([]wfv1.Workflow, error) + Delete(condition db.Cond) error + Close() error + IsNodeStatusOffload() bool + QueryWithPagination(condition db.Cond, pageSize uint, lastID string, orderBy ...interface{}) (*wfv1.WorkflowList, error) +} type WorkflowDB struct { Id string `db:"id"` @@ -45,10 +42,20 @@ type WorkflowDB struct { FinishedAt time.Time `db:"finishedat"` } -func convert(wf *wfv1.Workflow) *WorkflowDB { - jsonWf, _ := json.Marshal(wf) - startT, _ := time.Parse(time.RFC3339, wf.Status.StartedAt.Format(time.RFC3339)) - endT, _ := time.Parse(time.RFC3339, wf.Status.FinishedAt.Format(time.RFC3339)) +func convert(wf *wfv1.Workflow) (*WorkflowDB, error) { + jsonWf, err := json.Marshal(wf) + if err != nil { + return nil, err + } + startT, err := time.Parse(time.RFC3339, wf.Status.StartedAt.Format(time.RFC3339)) + if err != nil { + return nil, err + } + endT, err := time.Parse(time.RFC3339, wf.Status.FinishedAt.Format(time.RFC3339)) + if err != nil { + return nil, err + } + return &WorkflowDB{ Id: string(wf.UID), Name: wf.Name, @@ -57,8 +64,7 @@ func convert(wf *wfv1.Workflow) *WorkflowDB { Phase: wf.Status.Phase, StartedAt: startT, FinishedAt: endT, - } - + }, nil } func (wdc *WorkflowDBContext) IsNodeStatusOffload() bool { @@ -71,14 +77,11 @@ func (wdc *WorkflowDBContext) Init(sess sqlbuilder.Database) { // Save will upset the workflow func (wdc *WorkflowDBContext) Save(wf *wfv1.Workflow) error { - if wdc != nil && wdc.Session == nil { return DBInvalidSession(nil, "DB session is not initialized") } - wfdb := convert(wf) - - err := wdc.update(wfdb) + wfdb, err := convert(wf) if err != nil { if errors.IsCode(CodeDBUpdateRowNotFound, err) { return wdc.insert(wfdb) @@ -88,8 +91,12 @@ func (wdc *WorkflowDBContext) Save(wf *wfv1.Workflow) error { } } - log.Info("Workflow update successfully into persistence") + err = wdc.update(wfdb) + if err != nil { + return err + } + log.Info("Workflow update successfully into persistence") return nil } @@ -97,10 +104,12 @@ func (wdc *WorkflowDBContext) insert(wfDB *WorkflowDB) error { if wdc.Session == nil { return DBInvalidSession(nil, "DB session is not initialized") } + tx, err := wdc.Session.NewTx(context.TODO()) if err != nil { return errors.InternalErrorf("Error in creating transaction. %v", err) } + defer func() { if tx != nil { err := tx.Close() @@ -109,14 +118,17 @@ func (wdc *WorkflowDBContext) insert(wfDB *WorkflowDB) error { } } }() + _, err = tx.Collection(wdc.TableName).Insert(wfDB) if err != nil { return errors.InternalErrorf("Error in inserting workflow in persistence. %v", err) } + err = tx.Commit() if err != nil { return errors.InternalErrorf("Error in Committing workflow insert in persistence. %v", err) } + return nil } @@ -124,11 +136,12 @@ func (wdc *WorkflowDBContext) update(wfDB *WorkflowDB) error { if wdc.Session == nil { return DBInvalidSession(nil, "DB session is not initialized") } - tx, err := wdc.Session.NewTx(context.TODO()) + tx, err := wdc.Session.NewTx(context.TODO()) if err != nil { return errors.InternalErrorf("Error in creating transaction. %v", err) } + defer func() { if tx != nil { err := tx.Close() @@ -137,6 +150,7 @@ func (wdc *WorkflowDBContext) update(wfDB *WorkflowDB) error { } } }() + err = tx.Collection(wdc.TableName).UpdateReturning(wfDB) if err != nil { if strings.Contains(err.Error(), "upper: no more rows in this result set") { @@ -149,26 +163,26 @@ func (wdc *WorkflowDBContext) update(wfDB *WorkflowDB) error { if err != nil { return errors.InternalErrorf("Error in Committing workflow update in persistence %v", err) } + return nil } func (wdc *WorkflowDBContext) Get(uid string) (*wfv1.Workflow, error) { - if wdc.Session == nil { - return nil, DBInvalidSession(nil, "DB session is not initiallized") + return nil, DBInvalidSession(nil, "DB session is not initialized") } - cond := db.Cond{"id":uid} + cond := db.Cond{"id": uid} wfs, err := wdc.Query(cond) - if err != nil { return nil, DBOperationError(err, "DB GET operation failed") } - if len(wfs) >0 { + if len(wfs) > 0 { return &wfs[0], nil } - return nil, DBOperationError(nil, "Row is not found") + + return nil, DBOperationError(nil, "Row is not found") } func (wdc *WorkflowDBContext) List(orderBy interface{}) (*wfv1.WorkflowList, error) { @@ -177,26 +191,26 @@ func (wdc *WorkflowDBContext) List(orderBy interface{}) (*wfv1.WorkflowList, err } wfs, err := wdc.Query(nil, orderBy) - if err != nil { return nil, err } - var wfList wfv1.WorkflowList - wfList.Items = wfs - return &wfList, nil + return &wfv1.WorkflowList{ + Items: wfs, + }, nil } - -func (wdc *WorkflowDBContext) Query(condition db.Cond, orderBy ...interface{} ) ([]wfv1.Workflow, error) { +func (wdc *WorkflowDBContext) Query(condition db.Cond, orderBy ...interface{}) ([]wfv1.Workflow, error) { var wfDBs []WorkflowDB if wdc.Session == nil { return nil, DBInvalidSession(nil, "DB session is not initialized") } - if err := wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(orderBy).All(&wfDBs); err != nil { - return nil, DBOperationError(err, "DB Query opeartion failed") + err := wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(orderBy).All(&wfDBs) + if err != nil { + return nil, DBOperationError(err, "DB Query operation failed") } + var wfs []wfv1.Workflow for _, wfDB := range wfDBs { var wf wfv1.Workflow @@ -217,15 +231,15 @@ func (wdc *WorkflowDBContext) Close() error { return wdc.Session.Close() } - -func (wdc *WorkflowDBContext) QueryWithPagination(condition db.Cond, pageLimit uint, lastId string, orderBy ...interface{} ) (*wfv1.WorkflowList, error) { +func (wdc *WorkflowDBContext) QueryWithPagination(condition db.Cond, pageLimit uint, lastId string, orderBy ...interface{}) (*wfv1.WorkflowList, error) { var wfDBs []WorkflowDB if wdc.Session == nil { return nil, DBInvalidSession(nil, "DB session is not initialized") } - if err := wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(orderBy).Paginate(pageLimit).NextPage(lastId).All(&wfDBs); err != nil { - return nil, DBOperationError(err, "DB Query opeartion failed") + err := wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(orderBy).Paginate(pageLimit).NextPage(lastId).All(&wfDBs) + if err != nil { + return nil, DBOperationError(err, "DB Query operation failed") } var wfs []wfv1.Workflow @@ -239,15 +253,14 @@ func (wdc *WorkflowDBContext) QueryWithPagination(condition db.Cond, pageLimit u } } - var wfList wfv1.WorkflowList - wfList.Items = wfs - - return &wfList, nil + return &wfv1.WorkflowList{ + Items: wfs, + }, nil } -func (wdc *WorkflowDBContext) Delete(condition db.Cond)(error){ +func (wdc *WorkflowDBContext) Delete(condition db.Cond) error { if wdc.Session == nil { return DBInvalidSession(nil, "DB session is not initialized") } return wdc.Session.Collection(wdc.TableName).Find(condition).Delete() -} \ No newline at end of file +} diff --git a/pkg/apis/workflow/v1alpha1/workflow_types.go b/pkg/apis/workflow/v1alpha1/workflow_types.go index d33c7505561a..dc266224919f 100644 --- a/pkg/apis/workflow/v1alpha1/workflow_types.go +++ b/pkg/apis/workflow/v1alpha1/workflow_types.go @@ -209,7 +209,6 @@ type WorkflowSpec struct { SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,21,opt,name=schedulerName"` // PodGC describes the strategy to use when to deleting completed pods - PodGC *PodGC `json:"podGC,omitempty" protobuf:"bytes,22,opt,name=podGC"` // PriorityClassName to apply to workflow pods. @@ -224,8 +223,7 @@ type WorkflowSpec struct { // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. - // +optiona - + // +optional SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,26,opt,name=securityContext"` // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of diff --git a/util/util.go b/util/util.go index 43c6ac77e273..fbf920f9e916 100644 --- a/util/util.go +++ b/util/util.go @@ -69,15 +69,10 @@ func GetClientConfig() clientcmd.ClientConfig { } func InitKubeClient() *rest.Config { - - var err error - var clientConfig clientcmd.ClientConfig - - clientConfig = GetClientConfig() + clientConfig := GetClientConfig() config, err := clientConfig.ClientConfig() if err != nil { - panic(err.Error()) + panic(err) } - return config -} \ No newline at end of file +} diff --git a/workflow/controller/operator.go b/workflow/controller/operator.go index 71b980bea22d..19a83a7e4111 100644 --- a/workflow/controller/operator.go +++ b/workflow/controller/operator.go @@ -1861,7 +1861,6 @@ func processItem(fstTmpl *fasttemplate.Template, name string, index int, item wf vals := make([]string, 0) for itemKey, itemVal := range item.MapVal { replaceMap[fmt.Sprintf("item.%s", itemKey)] = fmt.Sprintf("%v", itemVal) - } // sort the values so that the name is deterministic sort.Strings(vals) @@ -1920,13 +1919,11 @@ func expandSequence(seq *wfv1.Sequence) ([]wfv1.Item, error) { } if start <= end { for i := start; i <= end; i++ { - items = append(items, wfv1.Item{Type: wfv1.Number, StrVal: fmt.Sprintf(format, i)}) } } else { for i := start; i >= end; i-- { items = append(items, wfv1.Item{Type: wfv1.Number, StrVal: fmt.Sprintf(format, i)}) - } } return items, nil diff --git a/workflow/controller/workflowpod.go b/workflow/controller/workflowpod.go index 7cf14f0b5b1f..d60335f8e837 100644 --- a/workflow/controller/workflowpod.go +++ b/workflow/controller/workflowpod.go @@ -234,7 +234,7 @@ func (woc *wfOperationCtx) createWorkflowPod(nodeName string, mainCtr apiv1.Cont return nil, errors.Wrap(err, "", "Fail to marshal the Pod spec") } - tmpl.PodSpecPatch, err = util. PodSpecPatchMerge(woc.wf, tmpl) + tmpl.PodSpecPatch, err = util.PodSpecPatchMerge(woc.wf, tmpl) if err != nil { return nil, errors.Wrap(err, "", "Fail to marshal the Pod spec") diff --git a/workflow/validate/validate_test.go b/workflow/validate/validate_test.go index 98830bb88b96..8b7b6b175616 100644 --- a/workflow/validate/validate_test.go +++ b/workflow/validate/validate_test.go @@ -3,10 +3,10 @@ package validate import ( "testing" - "sigs.k8s.io/yaml" "github.com/stretchr/testify/assert" apierr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" fakewfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned/fake" @@ -302,7 +302,6 @@ func TestStepOutputReference(t *testing.T) { assert.Nil(t, err) } - var stepStatusReferences = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -341,7 +340,6 @@ func TestStepStatusReference(t *testing.T) { assert.Nil(t, err) } - var stepStatusReferencesNoFutureReference = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow @@ -975,7 +973,6 @@ func TestPriorityVariable(t *testing.T) { assert.Nil(t, err) } - var volumeMountArtifactPathCollision = ` apiVersion: argoproj.io/v1alpha1 kind: Workflow From 8f6972c9267b083942cd95c571f07318347fde15 Mon Sep 17 00:00:00 2001 From: Simon Behar Date: Fri, 22 Nov 2019 10:50:36 -0800 Subject: [PATCH 010/421] Fixed more format and dependency issues --- Gopkg.lock | 9 ++++ Gopkg.toml | 2 + api/openapi-spec/swagger.json | 2 + cmd/server/workflow/workflow.proto | 54 +++++++++---------- cmd/server/workflow/workflow.swagger.json | 6 ++- pkg/apis/workflow/v1alpha1/generated.proto | 4 ++ .../workflow/v1alpha1/openapi_generated.go | 6 ++- 7 files changed, 51 insertions(+), 32 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index c190ca2aea4d..38a2e161c803 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -302,6 +302,14 @@ revision = "0ca988a254f991240804bf9821f3450d87ccbb1b" version = "v1.3.0" +[[projects]] + branch = "master" + digest = "1:107b233e45174dbab5b1324201d092ea9448e58243ab9f039e4c0f332e121e3a" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + [[projects]] digest = "1:b852d2b62be24e445fcdbad9ce3015b44c207815d631230dfce3f14e7803f5bf" name = "github.com/golang/protobuf" @@ -1505,6 +1513,7 @@ "github.com/gogo/protobuf/gogoproto", "github.com/gogo/protobuf/proto", "github.com/gogo/protobuf/sortkeys", + "github.com/golang/glog", "github.com/golang/protobuf/proto", "github.com/gorilla/websocket", "github.com/grpc-ecosystem/grpc-gateway/runtime", diff --git a/Gopkg.toml b/Gopkg.toml index bd3036bf94d2..313c4472099e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -3,6 +3,8 @@ required = [ "k8s.io/code-generator/cmd/deepcopy-gen", "k8s.io/code-generator/cmd/informer-gen", "k8s.io/code-generator/cmd/lister-gen", + "gonum.org/v1/gonum/graph", + "github.com/golang/glog", ] [[constraint]] diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 5771c3de0b98..743cc53ed187 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -1538,6 +1538,7 @@ "format": "int64" }, "podGC": { + "description": "PodGC describes the strategy to use when to deleting completed pods", "$ref": "#/definitions/io.argoproj.workflow.v1alpha1.PodGC" }, "podPriority": { @@ -1563,6 +1564,7 @@ "type": "string" }, "securityContext": { + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext" }, "serviceAccountName": { diff --git a/cmd/server/workflow/workflow.proto b/cmd/server/workflow/workflow.proto index b629868499b4..46c0ae0de491 100644 --- a/cmd/server/workflow/workflow.proto +++ b/cmd/server/workflow/workflow.proto @@ -13,44 +13,43 @@ import "k8s.io/api/core/v1/generated.proto"; // Workflow Service API performs CRUD actions against application resources package workflow; -message SubmitOptions{ +message SubmitOptions { string Name = 1; string GenerateName = 2; string InstanceID = 3; string Entrypoint = 4; repeated string Parameters = 5; - string ServiceAccount = 6; - bool ServerDryRun = 7; - string Labels = 8; + string ServiceAccount = 6; + bool ServerDryRun = 7; + string Labels = 8; k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference OwnerReference = 9; - } -message WorkflowCreateRequest{ +message WorkflowCreateRequest { string Namespace = 1; github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow Workflow = 2; k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions CreateOptions = 3; SubmitOptions SubmitOptions = 4; } -message WorkflowGetRequest{ +message WorkflowGetRequest { string WorkflowName = 1; string Namespace = 2; - k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions GetOptions =3; + k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions GetOptions = 3; } -message WorkflowListRequest{ +message WorkflowListRequest { string Namespace = 1; - k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions ListOptions =2; + k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions ListOptions = 2; } -message WorkflowUpdateRequest{ +message WorkflowUpdateRequest { string WorkflowName = 1; string Namespace = 2; bool Memoized = 3; } -message WorkflowLogRequest{ +message WorkflowLogRequest { string WorkflowName = 1; string Namespace = 2; string PodName = 3; @@ -59,12 +58,12 @@ message WorkflowLogRequest{ } -message WorkflowDeleteRequest{ +message WorkflowDeleteRequest { string WorkflowName = 1; string Namespace = 2; - k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions DeleteOptions =3; + k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions DeleteOptions = 3; } -message WorkflowDeleteResponse{ +message WorkflowDeleteResponse { string WorkflowName = 1; string Status = 2; } @@ -74,63 +73,62 @@ message LogEntry { k8s.io.apimachinery.pkg.apis.meta.v1.Time timeStamp = 2; } - service WorkflowService { - rpc Create(WorkflowCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Create (WorkflowCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { post: "/api/v1/workflows" body: "*" }; } - rpc Get(WorkflowGetRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Get (WorkflowGetRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http).get = "/api/v1/workflows/{Namespace}/{WorkflowName}"; } - rpc List(WorkflowListRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowList){ + rpc List (WorkflowListRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowList) { option (google.api.http).get = "/api/v1/workflows/{Namespace}"; } - rpc Delete(WorkflowDeleteRequest) returns (WorkflowDeleteResponse){ + rpc Delete (WorkflowDeleteRequest) returns (WorkflowDeleteResponse) { option (google.api.http).delete = "/api/v1/workflows/{Namespace}/{WorkflowName}"; } - rpc Retry(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Retry (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { put: "/api/v1/workflows/{Namespace}/{WorkflowName}/retry" body: "*" }; } - rpc Resubmit(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Resubmit (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { put: "/api/v1/workflows/{Namespace}/{WorkflowName}/resubmit" body: "*" }; } - rpc Resume(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Resume (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { put: "/api/v1/workflows/{Namespace}/{WorkflowName}/resume" body: "*" }; } - rpc Suspend(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Suspend (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { put: "/api/v1/workflows/{Namespace}/{WorkflowName}/suspend" body: "*" }; } - rpc Terminate(WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Terminate (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { put: "/api/v1/workflows/{Namespace}/{WorkflowName}/terminate" body: "*" }; } - rpc Lint(WorkflowCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow){ + rpc Lint (WorkflowCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { post: "/api/v1/workflows/lint" body: "*" @@ -138,12 +136,12 @@ service WorkflowService { } // PodLogs returns stream of log entries for the specified pod. Pod - rpc PodLogs(WorkflowLogRequest) returns (stream LogEntry) { + rpc PodLogs (WorkflowLogRequest) returns (stream LogEntry) { option (google.api.http).get = "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs"; } // Watch returns stream of application change events. - rpc Watch(WorkflowGetRequest) returns (stream github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { + rpc Watch (WorkflowGetRequest) returns (stream github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http).get = "/api/v1/stream/workflows/{Namespace}/{WorkflowName}"; } } \ No newline at end of file diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json index aa81113916fa..a262090e7df9 100644 --- a/cmd/server/workflow/workflow.swagger.json +++ b/cmd/server/workflow/workflow.swagger.json @@ -4198,7 +4198,8 @@ "title": "Set scheduler name for all pods.\nWill be overridden if container/script template's scheduler name is set.\nDefault scheduler will be used if neither specified.\n+optional" }, "podGC": { - "$ref": "#/definitions/v1alpha1PodGC" + "$ref": "#/definitions/v1alpha1PodGC", + "title": "PodGC describes the strategy to use when to deleting completed pods" }, "podPriorityClassName": { "type": "string", @@ -4217,7 +4218,8 @@ "title": "+patchStrategy=merge\n+patchMergeKey=ip" }, "securityContext": { - "$ref": "#/definitions/v1PodSecurityContext" + "$ref": "#/definitions/v1PodSecurityContext", + "title": "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field.\n+optional" }, "podSpecPatch": { "type": "string", diff --git a/pkg/apis/workflow/v1alpha1/generated.proto b/pkg/apis/workflow/v1alpha1/generated.proto index 8b0f4e4cb585..78ef0ac0291f 100644 --- a/pkg/apis/workflow/v1alpha1/generated.proto +++ b/pkg/apis/workflow/v1alpha1/generated.proto @@ -847,6 +847,7 @@ message WorkflowSpec { // +optional optional string schedulerName = 21; + // PodGC describes the strategy to use when to deleting completed pods optional PodGC podGC = 22; // PriorityClassName to apply to workflow pods. @@ -859,6 +860,9 @@ message WorkflowSpec { // +patchMergeKey=ip repeated k8s.io.api.core.v1.HostAlias hostAliases = 25; + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional optional k8s.io.api.core.v1.PodSecurityContext securityContext = 26; // PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of diff --git a/pkg/apis/workflow/v1alpha1/openapi_generated.go b/pkg/apis/workflow/v1alpha1/openapi_generated.go index 8cffc24b732a..407fd7f08563 100644 --- a/pkg/apis/workflow/v1alpha1/openapi_generated.go +++ b/pkg/apis/workflow/v1alpha1/openapi_generated.go @@ -2880,7 +2880,8 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, "podGC": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.PodGC"), + Description: "PodGC describes the strategy to use when to deleting completed pods", + Ref: ref("github.com/argoproj/argo/pkg/apis/workflow/v1alpha1.PodGC"), }, }, "podPriorityClassName": { @@ -2917,7 +2918,8 @@ func schema_pkg_apis_workflow_v1alpha1_WorkflowSpec(ref common.ReferenceCallback }, "securityContext": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), }, }, "podSpecPatch": { From 81970f90e72692e796c3ca164b4d33ad29820f57 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Tue, 10 Dec 2019 09:25:10 -0800 Subject: [PATCH 011/421] Added Workflow Template API --- Gopkg.lock | 9 + cmd/server/apiserver/argoserver.go | 6 +- cmd/server/{workflow => common}/common.go | 2 +- cmd/server/workflow/workflow.pb.go | 202 +- cmd/server/workflow/workflow.pb.gw.go | 240 +- cmd/server/workflow/workflow.proto | 80 +- cmd/server/workflow/workflow.swagger.json | 128 +- cmd/server/workflow/workflow_server.go | 9 +- cmd/server/workflow/workflow_service.go | 9 +- .../workflowtemplate/workflow-template.pb.go | 2327 +++++++++++ .../workflow-template.pb.gw.go | 693 ++++ .../workflowtemplate/workflow-template.proto | 82 + .../workflow-template.swagger.json | 3571 +++++++++++++++++ .../workflow_template_server.go | 182 + 14 files changed, 7205 insertions(+), 335 deletions(-) rename cmd/server/{workflow => common}/common.go (99%) create mode 100644 cmd/server/workflowtemplate/workflow-template.pb.go create mode 100644 cmd/server/workflowtemplate/workflow-template.pb.gw.go create mode 100644 cmd/server/workflowtemplate/workflow-template.proto create mode 100644 cmd/server/workflowtemplate/workflow-template.swagger.json create mode 100644 cmd/server/workflowtemplate/workflow_template_server.go diff --git a/Gopkg.lock b/Gopkg.lock index 38a2e161c803..2d1a2459ec15 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -912,6 +912,14 @@ pruneopts = "" revision = "be0da057c5e3c2df569a2c25cd280149b7d7e7d0" +[[projects]] + digest = "1:c9adfdd54f2576a0453e082193916f72c45e29791a809ef366e183cd3bac65d4" + name = "gonum.org/v1/gonum" + packages = ["graph"] + pruneopts = "" + revision = "ef62c3b63e85fdc2902c339d3ca49d34546d21f4" + version = "v0.6.1" + [[projects]] digest = "1:ea4822af073aae8c1f0868534aa49e2e97c1cd4fcad76f0ef91427ce26428923" name = "google.golang.org/api" @@ -1533,6 +1541,7 @@ "github.com/valyala/fasttemplate", "golang.org/x/crypto/ssh", "golang.org/x/net/context", + "gonum.org/v1/gonum/graph", "google.golang.org/genproto/googleapis/api/annotations", "google.golang.org/grpc", "google.golang.org/grpc/codes", diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index 01de1fe21311..ff0df53fabcd 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -3,6 +3,7 @@ package apiserver import ( "crypto/tls" "github.com/argoproj/argo/cmd/server/workflow" + "github.com/argoproj/argo/cmd/server/workflowtemplate" "github.com/argoproj/argo/errors" "github.com/argoproj/argo/pkg/apiclient" "github.com/argoproj/argo/pkg/client/clientset/versioned" @@ -163,6 +164,9 @@ func (as *ArgoServer) newGRPCServer() *grpc.Server { workflowServer := workflow.NewWorkflowServer(as.Namespace, as.WfClientSet, as.KubeClientset, configMap, as.EnableClientAuth) workflow.RegisterWorkflowServiceServer(grpcServer, workflowServer) + workflowTemplateServer := workflowtemplate.NewWorkflowTemplateServer(as.Namespace, as.WfClientSet, as.KubeClientset, configMap, as.EnableClientAuth) + workflowtemplate.RegisterWorkflowTemplateServiceServer(grpcServer, workflowTemplateServer) + return grpcServer } @@ -192,7 +196,7 @@ func (a *ArgoServer) newHTTPServer(ctx context.Context, port int) *http.Server { gwCookieOpts := runtime.WithForwardResponseOption(a.translateGrpcCookieHeader) gwmux := runtime.NewServeMux(gwMuxOpts, gwCookieOpts) mustRegisterGWHandler(workflow.RegisterWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) - + mustRegisterGWHandler(workflowtemplate.RegisterWorkflowTemplateServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mux.Handle("/api/", gwmux) return &httpServer } diff --git a/cmd/server/workflow/common.go b/cmd/server/common/common.go similarity index 99% rename from cmd/server/workflow/common.go rename to cmd/server/common/common.go index 7a45379665e5..8a670051d7ce 100644 --- a/cmd/server/workflow/common.go +++ b/cmd/server/common/common.go @@ -1,4 +1,4 @@ -package workflow +package common import ( "k8s.io/client-go/rest" diff --git a/cmd/server/workflow/workflow.pb.go b/cmd/server/workflow/workflow.pb.go index e36ce8dad6d9..efe424f5a432 100644 --- a/cmd/server/workflow/workflow.pb.go +++ b/cmd/server/workflow/workflow.pb.go @@ -36,15 +36,15 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type SubmitOptions struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - GenerateName string `protobuf:"bytes,2,opt,name=GenerateName,proto3" json:"GenerateName,omitempty"` - InstanceID string `protobuf:"bytes,3,opt,name=InstanceID,proto3" json:"InstanceID,omitempty"` - Entrypoint string `protobuf:"bytes,4,opt,name=Entrypoint,proto3" json:"Entrypoint,omitempty"` - Parameters []string `protobuf:"bytes,5,rep,name=Parameters,proto3" json:"Parameters,omitempty"` - ServiceAccount string `protobuf:"bytes,6,opt,name=ServiceAccount,proto3" json:"ServiceAccount,omitempty"` - ServerDryRun bool `protobuf:"varint,7,opt,name=ServerDryRun,proto3" json:"ServerDryRun,omitempty"` - Labels string `protobuf:"bytes,8,opt,name=Labels,proto3" json:"Labels,omitempty"` - OwnerReference *v1.OwnerReference `protobuf:"bytes,9,opt,name=OwnerReference,proto3" json:"OwnerReference,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + GenerateName string `protobuf:"bytes,2,opt,name=generateName,proto3" json:"generateName,omitempty"` + InstanceID string `protobuf:"bytes,3,opt,name=instanceID,proto3" json:"instanceID,omitempty"` + Entrypoint string `protobuf:"bytes,4,opt,name=entrypoint,proto3" json:"entrypoint,omitempty"` + Parameters []string `protobuf:"bytes,5,rep,name=parameters,proto3" json:"parameters,omitempty"` + ServiceAccount string `protobuf:"bytes,6,opt,name=serviceAccount,proto3" json:"serviceAccount,omitempty"` + ServerDryRun bool `protobuf:"varint,7,opt,name=serverDryRun,proto3" json:"serverDryRun,omitempty"` + Labels string `protobuf:"bytes,8,opt,name=labels,proto3" json:"labels,omitempty"` + OwnerReference *v1.OwnerReference `protobuf:"bytes,9,opt,name=ownerReference,proto3" json:"ownerReference,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -147,10 +147,10 @@ func (m *SubmitOptions) GetOwnerReference() *v1.OwnerReference { } type WorkflowCreateRequest struct { - Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - Workflow *v1alpha1.Workflow `protobuf:"bytes,2,opt,name=Workflow,proto3" json:"Workflow,omitempty"` - CreateOptions *v1.CreateOptions `protobuf:"bytes,3,opt,name=CreateOptions,proto3" json:"CreateOptions,omitempty"` - SubmitOptions *SubmitOptions `protobuf:"bytes,4,opt,name=SubmitOptions,proto3" json:"SubmitOptions,omitempty"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Workflow *v1alpha1.Workflow `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + CreateOptions *v1.CreateOptions `protobuf:"bytes,3,opt,name=createOptions,proto3" json:"createOptions,omitempty"` + SubmitOptions *SubmitOptions `protobuf:"bytes,4,opt,name=submitOptions,proto3" json:"submitOptions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -218,9 +218,9 @@ func (m *WorkflowCreateRequest) GetSubmitOptions() *SubmitOptions { } type WorkflowGetRequest struct { - WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - GetOptions *v1.GetOptions `protobuf:"bytes,3,opt,name=GetOptions,proto3" json:"GetOptions,omitempty"` + WorkflowName string `protobuf:"bytes,1,opt,name=workflowName,proto3" json:"workflowName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + GetOptions *v1.GetOptions `protobuf:"bytes,3,opt,name=getOptions,proto3" json:"getOptions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -281,8 +281,8 @@ func (m *WorkflowGetRequest) GetGetOptions() *v1.GetOptions { } type WorkflowListRequest struct { - Namespace string `protobuf:"bytes,1,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - ListOptions *v1.ListOptions `protobuf:"bytes,2,opt,name=ListOptions,proto3" json:"ListOptions,omitempty"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + ListOptions *v1.ListOptions `protobuf:"bytes,2,opt,name=listOptions,proto3" json:"listOptions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -336,9 +336,9 @@ func (m *WorkflowListRequest) GetListOptions() *v1.ListOptions { } type WorkflowUpdateRequest struct { - WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - Memoized bool `protobuf:"varint,3,opt,name=Memoized,proto3" json:"Memoized,omitempty"` + WorkflowName string `protobuf:"bytes,1,opt,name=workflowName,proto3" json:"workflowName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Memoized bool `protobuf:"varint,3,opt,name=memoized,proto3" json:"memoized,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -399,10 +399,10 @@ func (m *WorkflowUpdateRequest) GetMemoized() bool { } type WorkflowLogRequest struct { - WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - PodName string `protobuf:"bytes,3,opt,name=PodName,proto3" json:"PodName,omitempty"` - Container string `protobuf:"bytes,4,opt,name=Container,proto3" json:"Container,omitempty"` + WorkflowName string `protobuf:"bytes,1,opt,name=workflowName,proto3" json:"workflowName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + PodName string `protobuf:"bytes,3,opt,name=podName,proto3" json:"podName,omitempty"` + Container string `protobuf:"bytes,4,opt,name=container,proto3" json:"container,omitempty"` LogOptions *v11.PodLogOptions `protobuf:"bytes,5,opt,name=logOptions,proto3" json:"logOptions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -478,9 +478,9 @@ func (m *WorkflowLogRequest) GetLogOptions() *v11.PodLogOptions { } type WorkflowDeleteRequest struct { - WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=Namespace,proto3" json:"Namespace,omitempty"` - DeleteOptions *v1.DeleteOptions `protobuf:"bytes,3,opt,name=DeleteOptions,proto3" json:"DeleteOptions,omitempty"` + WorkflowName string `protobuf:"bytes,1,opt,name=workflowName,proto3" json:"workflowName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + DeleteOptions *v1.DeleteOptions `protobuf:"bytes,3,opt,name=deleteOptions,proto3" json:"deleteOptions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -541,8 +541,8 @@ func (m *WorkflowDeleteRequest) GetDeleteOptions() *v1.DeleteOptions { } type WorkflowDeleteResponse struct { - WorkflowName string `protobuf:"bytes,1,opt,name=WorkflowName,proto3" json:"WorkflowName,omitempty"` - Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"` + WorkflowName string `protobuf:"bytes,1,opt,name=workflowName,proto3" json:"workflowName,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -665,77 +665,77 @@ func init() { func init() { proto.RegisterFile("cmd/server/workflow/workflow.proto", fileDescriptor_192bc67c39cca05a) } var fileDescriptor_192bc67c39cca05a = []byte{ - // 1111 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x97, 0xcf, 0x6e, 0xe4, 0xc4, - 0x13, 0xc7, 0xd5, 0xf9, 0x33, 0x99, 0x74, 0x36, 0xf9, 0xe9, 0xd7, 0x40, 0x18, 0x99, 0x6c, 0x36, - 0x6b, 0x09, 0x14, 0x45, 0x2b, 0x3b, 0x99, 0x64, 0x61, 0xc9, 0x2a, 0x2c, 0x21, 0x41, 0x61, 0xa5, - 0x81, 0x8d, 0x3c, 0x41, 0xab, 0x20, 0x2e, 0x1d, 0x4f, 0xad, 0x63, 0x32, 0xee, 0x36, 0xee, 0x9e, - 0x89, 0x42, 0x94, 0x03, 0x1c, 0x56, 0x70, 0xe2, 0xc0, 0x85, 0x3b, 0x12, 0x5a, 0xad, 0x10, 0x27, - 0x1e, 0x82, 0x23, 0x88, 0x17, 0x40, 0x11, 0x12, 0xe2, 0x2d, 0x50, 0xb7, 0xed, 0xb1, 0x3d, 0x99, - 0xac, 0x66, 0x48, 0xe6, 0xe6, 0xae, 0x72, 0x55, 0x7f, 0xfc, 0xad, 0x72, 0xb5, 0x8d, 0x4d, 0x37, - 0x68, 0xd8, 0x02, 0xa2, 0x36, 0x44, 0xf6, 0x31, 0x8f, 0x8e, 0x9e, 0x34, 0xf9, 0x71, 0xe7, 0xc2, - 0x0a, 0x23, 0x2e, 0x39, 0x29, 0xa7, 0x6b, 0xe3, 0x65, 0x8f, 0x7b, 0x5c, 0x1b, 0x6d, 0x75, 0x15, - 0xfb, 0x8d, 0x39, 0x8f, 0x73, 0xaf, 0x09, 0x36, 0x0d, 0x7d, 0x9b, 0x32, 0xc6, 0x25, 0x95, 0x3e, - 0x67, 0x22, 0xf1, 0xae, 0x1d, 0xdd, 0x13, 0x96, 0xcf, 0x95, 0x37, 0xa0, 0xee, 0xa1, 0xcf, 0x20, - 0x3a, 0xb1, 0xc3, 0x23, 0x4f, 0x19, 0x84, 0x1d, 0x80, 0xa4, 0x76, 0x7b, 0xc5, 0xf6, 0x80, 0x41, - 0x44, 0x25, 0x34, 0x92, 0xa8, 0x2d, 0xcf, 0x97, 0x87, 0xad, 0x03, 0xcb, 0xe5, 0x81, 0x4d, 0x23, - 0xbd, 0xe9, 0x67, 0xfa, 0x22, 0x0b, 0xed, 0xe0, 0xb6, 0x57, 0x68, 0x33, 0x3c, 0xa4, 0x17, 0x93, - 0x98, 0xd9, 0xd6, 0xb6, 0xcb, 0x23, 0xe8, 0xb1, 0x91, 0xf9, 0xcf, 0x08, 0x9e, 0xae, 0xb7, 0x0e, - 0x02, 0x5f, 0x3e, 0x0a, 0x35, 0x36, 0x21, 0x78, 0xec, 0x23, 0x1a, 0x40, 0x05, 0x2d, 0xa0, 0xc5, - 0x49, 0x47, 0x5f, 0x13, 0x13, 0xdf, 0xd8, 0x49, 0x02, 0xb5, 0x6f, 0x44, 0xfb, 0x0a, 0x36, 0x32, - 0x8f, 0xf1, 0x43, 0x26, 0x24, 0x65, 0x2e, 0x3c, 0xdc, 0xae, 0x8c, 0xea, 0x3b, 0x72, 0x16, 0xe5, - 0x7f, 0x9f, 0xc9, 0xe8, 0x24, 0xe4, 0x3e, 0x93, 0x95, 0xb1, 0xd8, 0x9f, 0x59, 0x94, 0x7f, 0x97, - 0x46, 0x34, 0x00, 0x09, 0x91, 0xa8, 0x8c, 0x2f, 0x8c, 0x2a, 0x7f, 0x66, 0x21, 0x6f, 0xe0, 0x99, - 0x3a, 0x44, 0x6d, 0xdf, 0x85, 0x4d, 0xd7, 0xe5, 0x2d, 0x26, 0x2b, 0x25, 0x9d, 0xa3, 0xcb, 0xaa, - 0x58, 0xeb, 0xba, 0xa0, 0xdb, 0xd1, 0x89, 0xd3, 0x62, 0x95, 0x89, 0x05, 0xb4, 0x58, 0x76, 0x0a, - 0x36, 0x32, 0x8b, 0x4b, 0x35, 0x7a, 0x00, 0x4d, 0x51, 0x29, 0xeb, 0x1c, 0xc9, 0x8a, 0x7c, 0x8a, - 0x67, 0x1e, 0x1d, 0x33, 0x88, 0x1c, 0x78, 0x02, 0x11, 0x30, 0x17, 0x2a, 0x93, 0x0b, 0x68, 0x71, - 0xaa, 0xba, 0x66, 0xc5, 0x52, 0x5a, 0xf9, 0x2a, 0x5a, 0xe1, 0x91, 0xa7, 0x0c, 0xc2, 0x52, 0x55, - 0xb4, 0xda, 0x2b, 0x56, 0x31, 0xd6, 0xe9, 0xca, 0x65, 0x3e, 0x1f, 0xc1, 0xaf, 0x3c, 0x4e, 0xaa, - 0xb6, 0x15, 0x01, 0x95, 0xe0, 0xc0, 0xe7, 0x2d, 0x10, 0x92, 0xcc, 0xe1, 0x49, 0xa5, 0xa1, 0x08, - 0xa9, 0x9b, 0x0a, 0x9f, 0x19, 0xc8, 0x3e, 0x2e, 0xa7, 0x61, 0x5a, 0xf9, 0xa9, 0xea, 0x86, 0x95, - 0xf5, 0x87, 0x95, 0xf6, 0x87, 0xbe, 0xc8, 0xa0, 0x3a, 0x5d, 0x9c, 0xf6, 0x87, 0x95, 0x26, 0x71, - 0x3a, 0xe9, 0xc8, 0x3e, 0x9e, 0x8e, 0x49, 0x92, 0xea, 0xeb, 0xba, 0x4d, 0x55, 0x57, 0xfb, 0x7b, - 0xde, 0x42, 0xa8, 0x53, 0xcc, 0x44, 0x36, 0xba, 0x1a, 0x4b, 0x97, 0x7c, 0xaa, 0xfa, 0x6a, 0x06, - 0x56, 0x70, 0x3b, 0xc5, 0xbb, 0xcd, 0x67, 0x08, 0x93, 0x14, 0x73, 0x07, 0x64, 0xaa, 0x94, 0x89, - 0x6f, 0xa4, 0xd6, 0x5c, 0x97, 0x16, 0x6c, 0x45, 0x35, 0x47, 0xba, 0xd5, 0xdc, 0xc5, 0x78, 0x07, - 0x64, 0xf1, 0x79, 0x97, 0xfb, 0x7b, 0xde, 0x2c, 0xce, 0xc9, 0xe5, 0x30, 0xbf, 0x46, 0xf8, 0xa5, - 0x14, 0xa0, 0xe6, 0x0b, 0xd9, 0x5f, 0x55, 0xeb, 0x78, 0x4a, 0xdd, 0x9c, 0x82, 0xc4, 0x85, 0x5d, - 0xe9, 0x0f, 0x24, 0x17, 0xe8, 0xe4, 0xb3, 0x98, 0xad, 0xac, 0xc3, 0x3e, 0x0e, 0x1b, 0xb9, 0x0e, - 0xbb, 0xba, 0x6e, 0x06, 0x2e, 0x7f, 0x08, 0x01, 0xf7, 0xbf, 0x80, 0x86, 0x56, 0xad, 0xec, 0x74, - 0xd6, 0xe6, 0xef, 0xb9, 0x62, 0xd5, 0xb8, 0x77, 0x7d, 0x9b, 0x56, 0xf0, 0xc4, 0x2e, 0x6f, 0xe8, - 0xe0, 0x78, 0xa2, 0xa4, 0x4b, 0x15, 0xb7, 0xc5, 0x99, 0xa4, 0x4a, 0xa1, 0x64, 0x9a, 0x64, 0x06, - 0xb2, 0x89, 0x71, 0x93, 0x7b, 0xa9, 0xb6, 0xe3, 0x5a, 0xdb, 0xdb, 0x39, 0x6d, 0x2d, 0x35, 0x0f, - 0x95, 0x92, 0xbb, 0xbc, 0x51, 0xeb, 0xdc, 0xe8, 0xe4, 0x82, 0xcc, 0x5f, 0x50, 0xa6, 0xe5, 0x36, - 0x34, 0xe1, 0x3a, 0xb5, 0xdc, 0xc7, 0xd3, 0x71, 0xca, 0xff, 0xf4, 0xda, 0x15, 0x42, 0x9d, 0x62, - 0x26, 0x73, 0x0f, 0xcf, 0x76, 0x53, 0x8b, 0x90, 0x33, 0x01, 0x7d, 0x61, 0xcf, 0xe2, 0x52, 0x5d, - 0x52, 0xd9, 0x12, 0x09, 0x73, 0xb2, 0x32, 0x19, 0x2e, 0xd7, 0xb8, 0xa7, 0xa7, 0xb5, 0xaa, 0x89, - 0xcb, 0x99, 0x04, 0x26, 0x93, 0x14, 0xe9, 0x92, 0x7c, 0x80, 0x27, 0xa5, 0x1f, 0x40, 0x5d, 0xd2, - 0x20, 0x4c, 0x1a, 0x7a, 0xa9, 0xbf, 0x47, 0xda, 0xf3, 0x03, 0x70, 0xb2, 0xe0, 0xea, 0xdf, 0x33, - 0xf8, 0x7f, 0x29, 0x58, 0x32, 0xdf, 0xc9, 0x53, 0x84, 0x4b, 0xf1, 0x88, 0x21, 0xb7, 0xb2, 0x21, - 0xd2, 0x73, 0xa0, 0x1a, 0x57, 0x1b, 0x90, 0xe6, 0xdc, 0x57, 0x7f, 0xfc, 0xf5, 0xdd, 0xc8, 0xac, - 0xf9, 0x7f, 0x7d, 0x76, 0xb6, 0x57, 0x3a, 0x87, 0xad, 0x58, 0x47, 0x4b, 0xe4, 0x7b, 0x84, 0x47, - 0x77, 0x40, 0x92, 0xb9, 0x8b, 0x14, 0xd9, 0xa4, 0xba, 0x2a, 0xc2, 0x9a, 0x46, 0xb0, 0xc8, 0x9d, - 0x0b, 0x08, 0xf6, 0x69, 0xa7, 0x91, 0xce, 0xec, 0xd3, 0x7c, 0xf9, 0xce, 0xc8, 0xb7, 0x08, 0x8f, - 0xa9, 0x79, 0x40, 0x6e, 0x5e, 0x64, 0xcb, 0x8d, 0x26, 0x63, 0xf3, 0x4a, 0x70, 0x2a, 0x93, 0xf9, - 0xba, 0x06, 0xbc, 0x45, 0x6e, 0xbe, 0x10, 0x90, 0x7c, 0x89, 0x70, 0x29, 0x6e, 0xc4, 0x5e, 0x55, - 0x2b, 0xbc, 0x58, 0xc6, 0xc2, 0xe5, 0x37, 0xc4, 0x3d, 0x9c, 0xaa, 0xb2, 0x34, 0x98, 0x2a, 0x3f, - 0x22, 0x3c, 0xee, 0x80, 0xea, 0xdd, 0x1e, 0x08, 0x85, 0x39, 0x79, 0xd5, 0xaa, 0x6d, 0x68, 0xbe, - 0xb7, 0x8c, 0xea, 0x20, 0x7c, 0x76, 0xa4, 0xd8, 0x54, 0x67, 0xfd, 0x84, 0x70, 0xd9, 0x01, 0xa1, - 0x0f, 0xc2, 0xa1, 0xb3, 0xbe, 0xab, 0x59, 0xd7, 0x8d, 0xbb, 0x03, 0xb2, 0xc6, 0x78, 0x0a, 0xf7, - 0x19, 0xc2, 0x25, 0x85, 0x1b, 0xc0, 0xd0, 0x61, 0xdf, 0xd1, 0xb0, 0xf7, 0x8c, 0xd5, 0x81, 0x61, - 0x03, 0x50, 0xa8, 0xcf, 0x11, 0x9e, 0xa8, 0xb7, 0x44, 0x08, 0xac, 0x31, 0x74, 0xd6, 0x07, 0x9a, - 0xf5, 0x6d, 0x63, 0x6d, 0x20, 0x56, 0x11, 0xd3, 0x29, 0xd8, 0x9f, 0x11, 0x9e, 0xdc, 0x83, 0x28, - 0xf0, 0xd9, 0x25, 0xc3, 0xee, 0x5a, 0x71, 0x37, 0x35, 0xee, 0x7d, 0xe3, 0xcd, 0x81, 0x70, 0x65, - 0xca, 0xa7, 0x80, 0xbf, 0xd1, 0x63, 0x87, 0xc9, 0xa1, 0x0f, 0xe6, 0xdb, 0x9a, 0xf5, 0x35, 0x73, - 0xf6, 0x22, 0x6b, 0xd3, 0x67, 0xba, 0x29, 0x9f, 0x22, 0xfd, 0xcd, 0x50, 0xe3, 0x9e, 0xe8, 0x35, - 0xa1, 0xb3, 0xcf, 0x13, 0x83, 0x64, 0xde, 0xf4, 0x70, 0x33, 0x77, 0xf4, 0x06, 0x9b, 0xe4, 0x41, - 0xf7, 0x06, 0x2f, 0xd2, 0x22, 0xe4, 0x0d, 0x61, 0x9f, 0x26, 0x5f, 0x26, 0x67, 0x76, 0x93, 0x7b, - 0x62, 0x19, 0x91, 0x1f, 0x10, 0x1e, 0x7f, 0x4c, 0xa5, 0x7b, 0x38, 0xdc, 0x83, 0xe2, 0xbe, 0x26, - 0xbe, 0x4b, 0x3a, 0x6f, 0x86, 0x90, 0x11, 0xd0, 0xa0, 0xaf, 0x2a, 0x2e, 0xa3, 0xf7, 0xd6, 0x7f, - 0x3d, 0x9f, 0x47, 0xbf, 0x9d, 0xcf, 0xa3, 0x3f, 0xcf, 0xe7, 0xd1, 0x27, 0x77, 0x2e, 0xfd, 0xef, - 0xec, 0xf1, 0xa3, 0x7c, 0x50, 0xd2, 0xff, 0x90, 0xab, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x9f, - 0x69, 0xa8, 0xf0, 0x46, 0x0f, 0x00, 0x00, + // 1112 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x97, 0xcf, 0x4f, 0x24, 0x45, + 0x14, 0xc7, 0x53, 0x2c, 0x0c, 0xc3, 0x63, 0xc1, 0x58, 0x2a, 0x4e, 0x5a, 0x96, 0x65, 0x3b, 0xd1, + 0x10, 0xb2, 0xe9, 0x86, 0x81, 0xd5, 0x95, 0x0d, 0xae, 0x08, 0x06, 0x4d, 0xc8, 0xee, 0xa6, 0xc1, + 0x6c, 0x30, 0x5e, 0x9a, 0x9e, 0xb7, 0x4d, 0xcb, 0x74, 0x55, 0xdb, 0x55, 0x03, 0x41, 0xc2, 0x41, + 0x0f, 0x1b, 0x3d, 0x79, 0xf0, 0xe2, 0xdd, 0xc4, 0x6c, 0x36, 0xc6, 0x93, 0x7f, 0x84, 0x47, 0x8d, + 0xff, 0x80, 0x21, 0x26, 0xc6, 0xff, 0xc2, 0x54, 0xf5, 0x6f, 0x18, 0x36, 0x33, 0x02, 0xb7, 0xae, + 0xf7, 0xfa, 0xbd, 0xfa, 0xd4, 0xf7, 0xbd, 0x79, 0x35, 0x0d, 0xa6, 0x17, 0xb6, 0x6c, 0x81, 0xf1, + 0x3e, 0xc6, 0xf6, 0x01, 0x8f, 0xf7, 0x9e, 0xb4, 0xf9, 0x41, 0xfe, 0x60, 0x45, 0x31, 0x97, 0x9c, + 0xd6, 0xb3, 0xb5, 0xf1, 0xaa, 0xcf, 0x7d, 0xae, 0x8d, 0xb6, 0x7a, 0x4a, 0xfc, 0xc6, 0xa4, 0xcf, + 0xb9, 0xdf, 0x46, 0xdb, 0x8d, 0x02, 0xdb, 0x65, 0x8c, 0x4b, 0x57, 0x06, 0x9c, 0x89, 0xd4, 0xbb, + 0xb8, 0x77, 0x57, 0x58, 0x01, 0x57, 0xde, 0xd0, 0xf5, 0x76, 0x03, 0x86, 0xf1, 0xa1, 0x1d, 0xed, + 0xf9, 0xca, 0x20, 0xec, 0x10, 0xa5, 0x6b, 0xef, 0xcf, 0xdb, 0x3e, 0x32, 0x8c, 0x5d, 0x89, 0xad, + 0x34, 0x6a, 0xd5, 0x0f, 0xe4, 0x6e, 0x67, 0xc7, 0xf2, 0x78, 0x68, 0xbb, 0xb1, 0xde, 0xf4, 0x73, + 0xfd, 0x50, 0x84, 0xe6, 0xb8, 0xfb, 0xf3, 0x6e, 0x3b, 0xda, 0x75, 0xcf, 0x26, 0x31, 0x8b, 0xad, + 0x6d, 0x8f, 0xc7, 0xd8, 0x65, 0x23, 0xf3, 0xdf, 0x01, 0x18, 0xdb, 0xec, 0xec, 0x84, 0x81, 0x7c, + 0x18, 0x69, 0x6c, 0x4a, 0x61, 0x90, 0xb9, 0x21, 0x36, 0xc8, 0x34, 0x99, 0x19, 0x71, 0xf4, 0x33, + 0x35, 0xe1, 0x7a, 0x16, 0xf8, 0x40, 0xf9, 0x06, 0xb4, 0xaf, 0x62, 0xa3, 0x53, 0x00, 0x01, 0x13, + 0xd2, 0x65, 0x1e, 0x7e, 0xbc, 0xd6, 0xb8, 0xa6, 0xdf, 0x28, 0x59, 0x94, 0x1f, 0x99, 0x8c, 0x0f, + 0x23, 0x1e, 0x30, 0xd9, 0x18, 0x4c, 0xfc, 0x85, 0x45, 0xf9, 0x23, 0x37, 0x76, 0x43, 0x94, 0x18, + 0x8b, 0xc6, 0xd0, 0xf4, 0x35, 0xe5, 0x2f, 0x2c, 0xf4, 0x2d, 0x18, 0x57, 0x85, 0x0a, 0x3c, 0x5c, + 0xf1, 0x3c, 0xde, 0x61, 0xb2, 0x51, 0xd3, 0x39, 0x4e, 0x59, 0x15, 0x6b, 0x52, 0xd0, 0xb5, 0xf8, + 0xd0, 0xe9, 0xb0, 0xc6, 0xf0, 0x34, 0x99, 0xa9, 0x3b, 0x15, 0x1b, 0x9d, 0x80, 0x5a, 0xdb, 0xdd, + 0xc1, 0xb6, 0x68, 0xd4, 0x75, 0x8e, 0x74, 0x45, 0x3f, 0x83, 0x71, 0x7e, 0xc0, 0x30, 0x76, 0xf0, + 0x09, 0xc6, 0xc8, 0x3c, 0x6c, 0x8c, 0x4c, 0x93, 0x99, 0xd1, 0xe6, 0xa2, 0x95, 0x48, 0x69, 0x95, + 0xab, 0x68, 0x45, 0x7b, 0xbe, 0x32, 0x08, 0x4b, 0x55, 0xd1, 0xda, 0x9f, 0xb7, 0x1e, 0x56, 0x62, + 0x9d, 0x53, 0xb9, 0xcc, 0xe7, 0x03, 0xf0, 0xda, 0xe3, 0xb4, 0x6a, 0xab, 0x31, 0xba, 0x12, 0x1d, + 0xfc, 0xa2, 0x83, 0x42, 0xd2, 0x49, 0x18, 0x51, 0x3a, 0x8b, 0xc8, 0xf5, 0x32, 0xe1, 0x0b, 0x03, + 0xdd, 0x86, 0xbc, 0x05, 0xb5, 0xf2, 0xa3, 0xcd, 0x65, 0xab, 0xe8, 0x0f, 0x2b, 0xeb, 0x0f, 0xfd, + 0x50, 0x40, 0xe5, 0x5d, 0x9c, 0xf5, 0x87, 0x95, 0xed, 0xed, 0xe4, 0xe9, 0xe8, 0x36, 0x8c, 0x79, + 0x9a, 0x24, 0xad, 0xbe, 0xae, 0xdb, 0x68, 0x73, 0xa1, 0xb7, 0xf3, 0xae, 0x96, 0x43, 0x9d, 0x6a, + 0x26, 0xba, 0x0c, 0x63, 0xa2, 0xdc, 0x58, 0xba, 0xe4, 0xa3, 0xcd, 0xd7, 0x0b, 0xb0, 0x4a, 0xdf, + 0x39, 0xd5, 0xb7, 0xcd, 0x67, 0x04, 0x68, 0x06, 0xbc, 0x8e, 0x32, 0x53, 0xca, 0x84, 0xeb, 0x59, + 0xfc, 0x83, 0xa2, 0x4b, 0x2b, 0xb6, 0xaa, 0x9a, 0x03, 0xa7, 0xd5, 0x7c, 0x04, 0xe0, 0xa3, 0xac, + 0x9e, 0x77, 0xae, 0xb7, 0xf3, 0xae, 0xe7, 0x71, 0x4e, 0x29, 0x87, 0xf9, 0x0d, 0x81, 0x57, 0x32, + 0xd4, 0x8d, 0x40, 0xc8, 0xde, 0xaa, 0xba, 0x09, 0xa3, 0xed, 0x40, 0xe4, 0x20, 0x49, 0x61, 0xe7, + 0x7b, 0x03, 0xd9, 0x28, 0x02, 0x9d, 0x72, 0x16, 0xb3, 0x53, 0x74, 0xd8, 0x27, 0x51, 0xab, 0xd4, + 0x61, 0x17, 0xd7, 0xcd, 0x80, 0x7a, 0x88, 0x21, 0x0f, 0xbe, 0xc4, 0x96, 0x56, 0xad, 0xee, 0xe4, + 0x6b, 0xf3, 0x8f, 0x52, 0xb1, 0x36, 0xb8, 0x7f, 0x79, 0x9b, 0x36, 0x60, 0x38, 0xe2, 0x2d, 0x1d, + 0x9c, 0x4c, 0x94, 0x6c, 0xa9, 0xe2, 0x3c, 0xce, 0xa4, 0xab, 0x14, 0x4a, 0xa7, 0x49, 0x61, 0xa0, + 0x2b, 0x00, 0x6d, 0xee, 0x67, 0xda, 0x0e, 0x69, 0x6d, 0x6f, 0x95, 0xb4, 0xb5, 0xd4, 0x3c, 0x54, + 0x4a, 0x3e, 0xe2, 0xad, 0x8d, 0xfc, 0x45, 0xa7, 0x14, 0x64, 0xfe, 0x4a, 0x0a, 0x2d, 0xd7, 0xb0, + 0x8d, 0x97, 0xa9, 0xe5, 0x36, 0x8c, 0xb5, 0x74, 0xca, 0xff, 0xf5, 0xb3, 0x5b, 0x2b, 0x87, 0x3a, + 0xd5, 0x4c, 0xe6, 0x16, 0x4c, 0x9c, 0xa6, 0x16, 0x11, 0x67, 0x02, 0x7b, 0xc2, 0x9e, 0x80, 0x9a, + 0x90, 0xae, 0xec, 0x88, 0x94, 0x39, 0x5d, 0x99, 0x0c, 0xea, 0x1b, 0xdc, 0xff, 0x50, 0x4d, 0x6b, + 0x55, 0x13, 0x25, 0x34, 0x32, 0x99, 0xa6, 0xc8, 0x96, 0xf4, 0x23, 0x18, 0x91, 0x41, 0x88, 0x9b, + 0xd2, 0x0d, 0xa3, 0xb4, 0xa1, 0x67, 0x7b, 0x3b, 0xd2, 0x56, 0x10, 0xa2, 0x53, 0x04, 0x37, 0xff, + 0x19, 0x87, 0x97, 0xb2, 0x63, 0x6c, 0x26, 0xf3, 0x9d, 0x3e, 0x25, 0x50, 0x4b, 0x26, 0x0e, 0xbd, + 0x59, 0x0c, 0x91, 0xae, 0x03, 0xd5, 0xb8, 0xd8, 0x80, 0x34, 0x27, 0xbf, 0xfe, 0xf3, 0xef, 0xef, + 0x07, 0x26, 0xcc, 0x97, 0xf5, 0xdd, 0xb9, 0x3f, 0x9f, 0x5f, 0xb6, 0x62, 0x89, 0xcc, 0xd2, 0x1f, + 0x08, 0x5c, 0x5b, 0x47, 0x49, 0x27, 0xcf, 0x52, 0x14, 0x93, 0xea, 0xa2, 0x08, 0x8b, 0x1a, 0xc1, + 0xa2, 0xb7, 0xcf, 0x20, 0xd8, 0x47, 0x79, 0x23, 0x1d, 0xdb, 0x47, 0xe5, 0xf2, 0x1d, 0xd3, 0xef, + 0x08, 0x0c, 0xaa, 0xe1, 0x40, 0x6f, 0x9c, 0x65, 0x2b, 0x8d, 0x26, 0x63, 0xe5, 0x42, 0x70, 0x2a, + 0x93, 0xf9, 0xa6, 0x06, 0xbc, 0x49, 0x6f, 0xbc, 0x10, 0x90, 0x7e, 0x45, 0xa0, 0x96, 0x34, 0x62, + 0xb7, 0xaa, 0x55, 0x7e, 0x58, 0xc6, 0xf4, 0xf9, 0x2f, 0x24, 0x3d, 0x9c, 0xa9, 0x32, 0xdb, 0x9f, + 0x2a, 0x3f, 0x11, 0x18, 0x72, 0x50, 0xf5, 0x6e, 0x17, 0x84, 0xca, 0x9c, 0xbc, 0x68, 0xd5, 0x96, + 0x35, 0xdf, 0x3b, 0x46, 0xb3, 0x1f, 0x3e, 0x3b, 0x56, 0x6c, 0xaa, 0xb3, 0x7e, 0x26, 0x50, 0x77, + 0x30, 0xb9, 0x08, 0xaf, 0x9c, 0xf5, 0x7d, 0xcd, 0xba, 0x64, 0xdc, 0xe9, 0x93, 0x35, 0xc1, 0x53, + 0xb8, 0xcf, 0x08, 0xd4, 0x14, 0x6e, 0x88, 0x57, 0x0e, 0xfb, 0x9e, 0x86, 0xbd, 0x6b, 0x2c, 0xf4, + 0x0d, 0x1b, 0xa2, 0x42, 0x7d, 0x4e, 0x60, 0x78, 0xb3, 0x23, 0x22, 0x64, 0xad, 0x2b, 0x67, 0xbd, + 0xaf, 0x59, 0xdf, 0x35, 0x16, 0xfb, 0x62, 0x15, 0x09, 0x9d, 0x82, 0xfd, 0x85, 0xc0, 0xc8, 0x16, + 0xc6, 0x61, 0xc0, 0xce, 0x19, 0x76, 0x97, 0x8a, 0xbb, 0xa2, 0x71, 0xef, 0x19, 0x6f, 0xf7, 0x85, + 0x2b, 0x33, 0x3e, 0x05, 0xfc, 0xad, 0x1e, 0x3b, 0x4c, 0x5e, 0xf9, 0x60, 0xbe, 0xa5, 0x59, 0xdf, + 0x30, 0x27, 0xce, 0xb2, 0xb6, 0x03, 0xa6, 0x9b, 0xf2, 0x29, 0x81, 0xe1, 0xe4, 0x56, 0x17, 0xdd, + 0x26, 0x74, 0xf1, 0xf7, 0xc4, 0xa0, 0x85, 0x37, 0xbb, 0xdc, 0xcc, 0x75, 0xbd, 0xc1, 0x0a, 0xbd, + 0x7f, 0x7a, 0x83, 0x17, 0x69, 0x11, 0xf1, 0x96, 0xb0, 0x8f, 0xd2, 0x7f, 0x26, 0xc7, 0x76, 0x9b, + 0xfb, 0x62, 0x8e, 0xd0, 0x1f, 0x09, 0x0c, 0x3d, 0x76, 0xa5, 0xb7, 0x7b, 0xb5, 0x17, 0xc5, 0x3d, + 0x4d, 0x7c, 0x87, 0xe6, 0xbf, 0x0c, 0x21, 0x63, 0x74, 0xc3, 0x9e, 0xaa, 0x38, 0x47, 0x3e, 0x58, + 0xfa, 0xed, 0x64, 0x8a, 0xfc, 0x7e, 0x32, 0x45, 0xfe, 0x3a, 0x99, 0x22, 0x9f, 0xde, 0x3e, 0xf7, + 0xbb, 0xb3, 0xcb, 0x87, 0xf2, 0x4e, 0x4d, 0x7f, 0x43, 0x2e, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, + 0xa3, 0x38, 0x06, 0xae, 0x46, 0x0f, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/cmd/server/workflow/workflow.pb.gw.go b/cmd/server/workflow/workflow.pb.gw.go index 715811bcd51b..a3cb10e630ac 100644 --- a/cmd/server/workflow/workflow.pb.gw.go +++ b/cmd/server/workflow/workflow.pb.gw.go @@ -63,7 +63,7 @@ func local_request_WorkflowService_Create_0(ctx context.Context, marshaler runti } var ( - filter_WorkflowService_Get_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0, "WorkflowName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} + filter_WorkflowService_Get_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "workflowName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} ) func request_WorkflowService_Get_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -77,26 +77,26 @@ func request_WorkflowService_Get_0(ctx context.Context, marshaler runtime.Marsha _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } if err := req.ParseForm(); err != nil { @@ -122,26 +122,26 @@ func local_request_WorkflowService_Get_0(ctx context.Context, marshaler runtime. _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WorkflowService_Get_0); err != nil { @@ -154,7 +154,7 @@ func local_request_WorkflowService_Get_0(ctx context.Context, marshaler runtime. } var ( - filter_WorkflowService_List_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} + filter_WorkflowService_List_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} ) func request_WorkflowService_List_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -168,15 +168,15 @@ func request_WorkflowService_List_0(ctx context.Context, marshaler runtime.Marsh _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } if err := req.ParseForm(); err != nil { @@ -202,15 +202,15 @@ func local_request_WorkflowService_List_0(ctx context.Context, marshaler runtime _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WorkflowService_List_0); err != nil { @@ -223,7 +223,7 @@ func local_request_WorkflowService_List_0(ctx context.Context, marshaler runtime } var ( - filter_WorkflowService_Delete_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0, "WorkflowName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} + filter_WorkflowService_Delete_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "workflowName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} ) func request_WorkflowService_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -237,26 +237,26 @@ func request_WorkflowService_Delete_0(ctx context.Context, marshaler runtime.Mar _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } if err := req.ParseForm(); err != nil { @@ -282,26 +282,26 @@ func local_request_WorkflowService_Delete_0(ctx context.Context, marshaler runti _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WorkflowService_Delete_0); err != nil { @@ -332,26 +332,26 @@ func request_WorkflowService_Retry_0(ctx context.Context, marshaler runtime.Mars _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := client.Retry(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -378,26 +378,26 @@ func local_request_WorkflowService_Retry_0(ctx context.Context, marshaler runtim _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := server.Retry(ctx, &protoReq) @@ -424,26 +424,26 @@ func request_WorkflowService_Resubmit_0(ctx context.Context, marshaler runtime.M _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := client.Resubmit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -470,26 +470,26 @@ func local_request_WorkflowService_Resubmit_0(ctx context.Context, marshaler run _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := server.Resubmit(ctx, &protoReq) @@ -516,26 +516,26 @@ func request_WorkflowService_Resume_0(ctx context.Context, marshaler runtime.Mar _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := client.Resume(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -562,26 +562,26 @@ func local_request_WorkflowService_Resume_0(ctx context.Context, marshaler runti _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := server.Resume(ctx, &protoReq) @@ -608,26 +608,26 @@ func request_WorkflowService_Suspend_0(ctx context.Context, marshaler runtime.Ma _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := client.Suspend(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -654,26 +654,26 @@ func local_request_WorkflowService_Suspend_0(ctx context.Context, marshaler runt _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := server.Suspend(ctx, &protoReq) @@ -700,26 +700,26 @@ func request_WorkflowService_Terminate_0(ctx context.Context, marshaler runtime. _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := client.Terminate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) @@ -746,26 +746,26 @@ func local_request_WorkflowService_Terminate_0(ctx context.Context, marshaler ru _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } msg, err := server.Terminate(ctx, &protoReq) @@ -808,7 +808,7 @@ func local_request_WorkflowService_Lint_0(ctx context.Context, marshaler runtime } var ( - filter_WorkflowService_PodLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0, "WorkflowName": 1, "PodName": 2}, Base: []int{1, 1, 2, 3, 0, 0, 0}, Check: []int{0, 1, 1, 1, 2, 3, 4}} + filter_WorkflowService_PodLogs_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "workflowName": 1, "podName": 2}, Base: []int{1, 1, 2, 3, 0, 0, 0}, Check: []int{0, 1, 1, 1, 2, 3, 4}} ) func request_WorkflowService_PodLogs_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (WorkflowService_PodLogsClient, runtime.ServerMetadata, error) { @@ -822,37 +822,37 @@ func request_WorkflowService_PodLogs_0(ctx context.Context, marshaler runtime.Ma _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } - val, ok = pathParams["PodName"] + val, ok = pathParams["podName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "PodName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "podName") } protoReq.PodName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "PodName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "podName", err) } if err := req.ParseForm(); err != nil { @@ -876,7 +876,7 @@ func request_WorkflowService_PodLogs_0(ctx context.Context, marshaler runtime.Ma } var ( - filter_WorkflowService_Watch_0 = &utilities.DoubleArray{Encoding: map[string]int{"Namespace": 0, "WorkflowName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} + filter_WorkflowService_Watch_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "workflowName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} ) func request_WorkflowService_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowServiceClient, req *http.Request, pathParams map[string]string) (WorkflowService_WatchClient, runtime.ServerMetadata, error) { @@ -890,26 +890,26 @@ func request_WorkflowService_Watch_0(ctx context.Context, marshaler runtime.Mars _ = err ) - val, ok = pathParams["Namespace"] + val, ok = pathParams["namespace"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "Namespace") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") } protoReq.Namespace, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "Namespace", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) } - val, ok = pathParams["WorkflowName"] + val, ok = pathParams["workflowName"] if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "WorkflowName") + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "workflowName") } protoReq.WorkflowName, err = runtime.String(val) if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "WorkflowName", err) + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "workflowName", err) } if err := req.ParseForm(); err != nil { @@ -1438,27 +1438,27 @@ func RegisterWorkflowServiceHandlerClient(ctx context.Context, mux *runtime.Serv var ( pattern_WorkflowService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "workflows"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "namespace", "workflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "Namespace"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflows", "namespace"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflows", "namespace", "workflowName"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Retry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "retry"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Retry_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "namespace", "workflowName", "retry"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Resubmit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "resubmit"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Resubmit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "namespace", "workflowName", "resubmit"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Resume_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "resume"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Resume_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "namespace", "workflowName", "resume"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Suspend_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "suspend"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Suspend_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "namespace", "workflowName", "suspend"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Terminate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "Namespace", "WorkflowName", "terminate"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Terminate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "namespace", "workflowName", "terminate"}, "", runtime.AssumeColonVerbOpt(true))) pattern_WorkflowService_Lint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "workflows", "lint"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_PodLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6, 2, 7}, []string{"api", "v1", "workflow", "Namespace", "WorkflowName", "pods", "PodName", "logs"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_PodLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6, 2, 7}, []string{"api", "v1", "workflow", "namespace", "workflowName", "pods", "podName", "logs"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "stream", "workflows", "Namespace", "WorkflowName"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "stream", "workflows", "namespace", "workflowName"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( diff --git a/cmd/server/workflow/workflow.proto b/cmd/server/workflow/workflow.proto index 46c0ae0de491..715ef07735b9 100644 --- a/cmd/server/workflow/workflow.proto +++ b/cmd/server/workflow/workflow.proto @@ -14,58 +14,58 @@ import "k8s.io/api/core/v1/generated.proto"; package workflow; message SubmitOptions { - string Name = 1; - string GenerateName = 2; - string InstanceID = 3; - string Entrypoint = 4; - repeated string Parameters = 5; - string ServiceAccount = 6; - bool ServerDryRun = 7; - string Labels = 8; - k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference OwnerReference = 9; + string name = 1; + string generateName = 2; + string instanceID = 3; + string entrypoint = 4; + repeated string parameters = 5; + string serviceAccount = 6; + bool serverDryRun = 7; + string labels = 8; + k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReference = 9; } message WorkflowCreateRequest { - string Namespace = 1; - github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow Workflow = 2; - k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions CreateOptions = 3; - SubmitOptions SubmitOptions = 4; + string namespace = 1; + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow workflow = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions createOptions = 3; + SubmitOptions submitOptions = 4; } message WorkflowGetRequest { - string WorkflowName = 1; - string Namespace = 2; - k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions GetOptions = 3; + string workflowName = 1; + string namespace = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions getOptions = 3; } message WorkflowListRequest { - string Namespace = 1; - k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions ListOptions = 2; + string namespace = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions listOptions = 2; } message WorkflowUpdateRequest { - string WorkflowName = 1; - string Namespace = 2; - bool Memoized = 3; + string workflowName = 1; + string namespace = 2; + bool memoized = 3; } message WorkflowLogRequest { - string WorkflowName = 1; - string Namespace = 2; - string PodName = 3; - string Container = 4; + string workflowName = 1; + string namespace = 2; + string podName = 3; + string container = 4; k8s.io.api.core.v1.PodLogOptions logOptions = 5; } message WorkflowDeleteRequest { - string WorkflowName = 1; - string Namespace = 2; - k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions DeleteOptions = 3; + string workflowName = 1; + string namespace = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 3; } message WorkflowDeleteResponse { - string WorkflowName = 1; - string Status = 2; + string workflowName = 1; + string status = 2; } message LogEntry { @@ -82,48 +82,48 @@ service WorkflowService { } rpc Get (WorkflowGetRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { - option (google.api.http).get = "/api/v1/workflows/{Namespace}/{WorkflowName}"; + option (google.api.http).get = "/api/v1/workflows/{namespace}/{workflowName}"; } rpc List (WorkflowListRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowList) { - option (google.api.http).get = "/api/v1/workflows/{Namespace}"; + option (google.api.http).get = "/api/v1/workflows/{namespace}"; } rpc Delete (WorkflowDeleteRequest) returns (WorkflowDeleteResponse) { - option (google.api.http).delete = "/api/v1/workflows/{Namespace}/{WorkflowName}"; + option (google.api.http).delete = "/api/v1/workflows/{namespace}/{workflowName}"; } rpc Retry (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}/retry" + put: "/api/v1/workflows/{namespace}/{workflowName}/retry" body: "*" }; } rpc Resubmit (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}/resubmit" + put: "/api/v1/workflows/{namespace}/{workflowName}/resubmit" body: "*" }; } rpc Resume (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}/resume" + put: "/api/v1/workflows/{namespace}/{workflowName}/resume" body: "*" }; } rpc Suspend (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}/suspend" + put: "/api/v1/workflows/{namespace}/{workflowName}/suspend" body: "*" }; } rpc Terminate (WorkflowUpdateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { - put: "/api/v1/workflows/{Namespace}/{WorkflowName}/terminate" + put: "/api/v1/workflows/{namespace}/{workflowName}/terminate" body: "*" }; } @@ -137,11 +137,11 @@ service WorkflowService { // PodLogs returns stream of log entries for the specified pod. Pod rpc PodLogs (WorkflowLogRequest) returns (stream LogEntry) { - option (google.api.http).get = "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs"; + option (google.api.http).get = "/api/v1/workflow/{namespace}/{workflowName}/pods/{podName}/logs"; } // Watch returns stream of application change events. rpc Watch (WorkflowGetRequest) returns (stream github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { - option (google.api.http).get = "/api/v1/stream/workflows/{Namespace}/{WorkflowName}"; + option (google.api.http).get = "/api/v1/stream/workflows/{namespace}/{workflowName}"; } } \ No newline at end of file diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json index a262090e7df9..3f6df0d8a6ed 100644 --- a/cmd/server/workflow/workflow.swagger.json +++ b/cmd/server/workflow/workflow.swagger.json @@ -16,7 +16,7 @@ "application/json" ], "paths": { - "/api/v1/stream/workflows/{Namespace}/{WorkflowName}": { + "/api/v1/stream/workflows/{namespace}/{workflowName}": { "get": { "summary": "Watch returns stream of application change events.", "operationId": "Watch", @@ -30,19 +30,19 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "WorkflowName", + "name": "workflowName", "in": "path", "required": true, "type": "string" }, { - "name": "GetOptions.resourceVersion", + "name": "getOptions.resourceVersion", "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.", "in": "query", "required": false, @@ -54,7 +54,7 @@ ] } }, - "/api/v1/workflow/{Namespace}/{WorkflowName}/pods/{PodName}/logs": { + "/api/v1/workflow/{namespace}/{workflowName}/pods/{podName}/logs": { "get": { "summary": "PodLogs returns stream of log entries for the specified pod. Pod", "operationId": "PodLogs", @@ -68,25 +68,25 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "WorkflowName", + "name": "workflowName", "in": "path", "required": true, "type": "string" }, { - "name": "PodName", + "name": "podName", "in": "path", "required": true, "type": "string" }, { - "name": "Container", + "name": "container", "in": "query", "required": false, "type": "string" @@ -220,7 +220,7 @@ ] } }, - "/api/v1/workflows/{Namespace}": { + "/api/v1/workflows/{namespace}": { "get": { "operationId": "List", "responses": { @@ -233,27 +233,27 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "ListOptions.labelSelector", + "name": "listOptions.labelSelector", "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", "in": "query", "required": false, "type": "string" }, { - "name": "ListOptions.fieldSelector", + "name": "listOptions.fieldSelector", "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", "in": "query", "required": false, "type": "string" }, { - "name": "ListOptions.watch", + "name": "listOptions.watch", "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", "in": "query", "required": false, @@ -261,7 +261,7 @@ "format": "boolean" }, { - "name": "ListOptions.allowWatchBookmarks", + "name": "listOptions.allowWatchBookmarks", "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n\nThis field is beta.\n\n+optional", "in": "query", "required": false, @@ -269,14 +269,14 @@ "format": "boolean" }, { - "name": "ListOptions.resourceVersion", + "name": "listOptions.resourceVersion", "description": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional.", "in": "query", "required": false, "type": "string" }, { - "name": "ListOptions.timeoutSeconds", + "name": "listOptions.timeoutSeconds", "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", "in": "query", "required": false, @@ -284,7 +284,7 @@ "format": "int64" }, { - "name": "ListOptions.limit", + "name": "listOptions.limit", "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", "in": "query", "required": false, @@ -292,7 +292,7 @@ "format": "int64" }, { - "name": "ListOptions.continue", + "name": "listOptions.continue", "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", "in": "query", "required": false, @@ -304,7 +304,7 @@ ] } }, - "/api/v1/workflows/{Namespace}/{WorkflowName}": { + "/api/v1/workflows/{namespace}/{workflowName}": { "get": { "operationId": "Get", "responses": { @@ -317,19 +317,19 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "WorkflowName", + "name": "workflowName", "in": "path", "required": true, "type": "string" }, { - "name": "GetOptions.resourceVersion", + "name": "getOptions.resourceVersion", "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.", "in": "query", "required": false, @@ -352,19 +352,19 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "WorkflowName", + "name": "workflowName", "in": "path", "required": true, "type": "string" }, { - "name": "DeleteOptions.gracePeriodSeconds", + "name": "deleteOptions.gracePeriodSeconds", "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.", "in": "query", "required": false, @@ -372,21 +372,21 @@ "format": "int64" }, { - "name": "DeleteOptions.preconditions.uid", + "name": "deleteOptions.preconditions.uid", "description": "Specifies the target UID.\n+optional.", "in": "query", "required": false, "type": "string" }, { - "name": "DeleteOptions.preconditions.resourceVersion", + "name": "deleteOptions.preconditions.resourceVersion", "description": "Specifies the target ResourceVersion\n+optional.", "in": "query", "required": false, "type": "string" }, { - "name": "DeleteOptions.orphanDependents", + "name": "deleteOptions.orphanDependents", "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.", "in": "query", "required": false, @@ -394,14 +394,14 @@ "format": "boolean" }, { - "name": "DeleteOptions.propagationPolicy", + "name": "deleteOptions.propagationPolicy", "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.", "in": "query", "required": false, "type": "string" }, { - "name": "DeleteOptions.dryRun", + "name": "deleteOptions.dryRun", "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.", "in": "query", "required": false, @@ -417,7 +417,7 @@ ] } }, - "/api/v1/workflows/{Namespace}/{WorkflowName}/resubmit": { + "/api/v1/workflows/{namespace}/{workflowName}/resubmit": { "put": { "operationId": "Resubmit", "responses": { @@ -430,13 +430,13 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "WorkflowName", + "name": "workflowName", "in": "path", "required": true, "type": "string" @@ -455,7 +455,7 @@ ] } }, - "/api/v1/workflows/{Namespace}/{WorkflowName}/resume": { + "/api/v1/workflows/{namespace}/{workflowName}/resume": { "put": { "operationId": "Resume", "responses": { @@ -468,13 +468,13 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "WorkflowName", + "name": "workflowName", "in": "path", "required": true, "type": "string" @@ -493,7 +493,7 @@ ] } }, - "/api/v1/workflows/{Namespace}/{WorkflowName}/retry": { + "/api/v1/workflows/{namespace}/{workflowName}/retry": { "put": { "operationId": "Retry", "responses": { @@ -506,13 +506,13 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "WorkflowName", + "name": "workflowName", "in": "path", "required": true, "type": "string" @@ -531,7 +531,7 @@ ] } }, - "/api/v1/workflows/{Namespace}/{WorkflowName}/suspend": { + "/api/v1/workflows/{namespace}/{workflowName}/suspend": { "put": { "operationId": "Suspend", "responses": { @@ -544,13 +544,13 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "WorkflowName", + "name": "workflowName", "in": "path", "required": true, "type": "string" @@ -569,7 +569,7 @@ ] } }, - "/api/v1/workflows/{Namespace}/{WorkflowName}/terminate": { + "/api/v1/workflows/{namespace}/{workflowName}/terminate": { "put": { "operationId": "Terminate", "responses": { @@ -582,13 +582,13 @@ }, "parameters": [ { - "name": "Namespace", + "name": "namespace", "in": "path", "required": true, "type": "string" }, { - "name": "WorkflowName", + "name": "workflowName", "in": "path", "required": true, "type": "string" @@ -4338,35 +4338,35 @@ "workflowSubmitOptions": { "type": "object", "properties": { - "Name": { + "name": { "type": "string" }, - "GenerateName": { + "generateName": { "type": "string" }, - "InstanceID": { + "instanceID": { "type": "string" }, - "Entrypoint": { + "entrypoint": { "type": "string" }, - "Parameters": { + "parameters": { "type": "array", "items": { "type": "string" } }, - "ServiceAccount": { + "serviceAccount": { "type": "string" }, - "ServerDryRun": { + "serverDryRun": { "type": "boolean", "format": "boolean" }, - "Labels": { + "labels": { "type": "string" }, - "OwnerReference": { + "ownerReference": { "$ref": "#/definitions/v1OwnerReference" } } @@ -4374,16 +4374,16 @@ "workflowWorkflowCreateRequest": { "type": "object", "properties": { - "Namespace": { + "namespace": { "type": "string" }, - "Workflow": { + "workflow": { "$ref": "#/definitions/v1alpha1Workflow" }, - "CreateOptions": { + "createOptions": { "$ref": "#/definitions/v1CreateOptions" }, - "SubmitOptions": { + "submitOptions": { "$ref": "#/definitions/workflowSubmitOptions" } } @@ -4391,10 +4391,10 @@ "workflowWorkflowDeleteResponse": { "type": "object", "properties": { - "WorkflowName": { + "workflowName": { "type": "string" }, - "Status": { + "status": { "type": "string" } } @@ -4402,13 +4402,13 @@ "workflowWorkflowUpdateRequest": { "type": "object", "properties": { - "WorkflowName": { + "workflowName": { "type": "string" }, - "Namespace": { + "namespace": { "type": "string" }, - "Memoized": { + "memoized": { "type": "boolean", "format": "boolean" } diff --git a/cmd/server/workflow/workflow_server.go b/cmd/server/workflow/workflow_server.go index c86716ef06fa..05c67844c349 100644 --- a/cmd/server/workflow/workflow_server.go +++ b/cmd/server/workflow/workflow_server.go @@ -15,6 +15,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + apisrvcmn "github.com/argoproj/argo/cmd/server/common" "github.com/argoproj/argo/persist/sqldb" "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned" @@ -72,13 +73,13 @@ func (s *WorkflowServer) GetWFClient(ctx context.Context) (*versioned.Clientset, } var restConfigStr, bearerToken string - if len(md.Get(CLIENT_REST_CONFIG)) == 0 { + if len(md.Get(apisrvcmn.CLIENT_REST_CONFIG)) == 0 { return nil, nil, errors.New("Client kubeconfig is not found") } - restConfigStr = md.Get(CLIENT_REST_CONFIG)[0] + restConfigStr = md.Get(apisrvcmn.CLIENT_REST_CONFIG)[0] - if len(md.Get(AUTH_TOKEN)) > 0 { - bearerToken = md.Get(AUTH_TOKEN)[0] + if len(md.Get(apisrvcmn.AUTH_TOKEN)) > 0 { + bearerToken = md.Get(apisrvcmn.AUTH_TOKEN)[0] } restConfig := rest.Config{} diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go index 2d7e20502791..09a26a58457c 100644 --- a/cmd/server/workflow/workflow_service.go +++ b/cmd/server/workflow/workflow_service.go @@ -3,6 +3,7 @@ package workflow import ( "encoding/json" "errors" + "github.com/argoproj/argo/cmd/server/common" "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" @@ -39,13 +40,13 @@ func (s *KubeService) GetWFClient(ctx context.Context) (*versioned.Clientset, *k } var restConfigStr, bearerToken string - if len(md.Get(CLIENT_REST_CONFIG)) == 0 { + if len(md.Get(common.CLIENT_REST_CONFIG)) == 0 { return nil, nil, errors.New("Client kubeconfig is not found") } - restConfigStr = md.Get(CLIENT_REST_CONFIG)[0] + restConfigStr = md.Get(common.CLIENT_REST_CONFIG)[0] - if len(md.Get(AUTH_TOKEN)) > 0 { - bearerToken = md.Get(AUTH_TOKEN)[0] + if len(md.Get(common.AUTH_TOKEN)) > 0 { + bearerToken = md.Get(common.AUTH_TOKEN)[0] } restConfig := rest.Config{} diff --git a/cmd/server/workflowtemplate/workflow-template.pb.go b/cmd/server/workflowtemplate/workflow-template.pb.go new file mode 100644 index 000000000000..6f266dcc91c6 --- /dev/null +++ b/cmd/server/workflowtemplate/workflow-template.pb.go @@ -0,0 +1,2327 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cmd/server/workflowtemplate/workflow-template.proto + +// Workflow Service +// +// Workflow Service API performs CRUD actions against application resources + +package workflowtemplate + +import ( + context "context" + fmt "fmt" + v1alpha1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + _ "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type SubmitOptions struct { + Strict bool `protobuf:"varint,1,opt,name=Strict,proto3" json:"Strict,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SubmitOptions) Reset() { *m = SubmitOptions{} } +func (m *SubmitOptions) String() string { return proto.CompactTextString(m) } +func (*SubmitOptions) ProtoMessage() {} +func (*SubmitOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_ffa03b43b9ae30de, []int{0} +} +func (m *SubmitOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubmitOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubmitOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SubmitOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubmitOptions.Merge(m, src) +} +func (m *SubmitOptions) XXX_Size() int { + return m.Size() +} +func (m *SubmitOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SubmitOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SubmitOptions proto.InternalMessageInfo + +func (m *SubmitOptions) GetStrict() bool { + if m != nil { + return m.Strict + } + return false +} + +type WorkflowTemplateCreateRequest struct { + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Template *v1alpha1.WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"` + CreateOptions *v1.CreateOptions `protobuf:"bytes,3,opt,name=createOptions,proto3" json:"createOptions,omitempty"` + SubmitOptions *SubmitOptions `protobuf:"bytes,4,opt,name=submitOptions,proto3" json:"submitOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowTemplateCreateRequest) Reset() { *m = WorkflowTemplateCreateRequest{} } +func (m *WorkflowTemplateCreateRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowTemplateCreateRequest) ProtoMessage() {} +func (*WorkflowTemplateCreateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ffa03b43b9ae30de, []int{1} +} +func (m *WorkflowTemplateCreateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplateCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowTemplateCreateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowTemplateCreateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplateCreateRequest.Merge(m, src) +} +func (m *WorkflowTemplateCreateRequest) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplateCreateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplateCreateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplateCreateRequest proto.InternalMessageInfo + +func (m *WorkflowTemplateCreateRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *WorkflowTemplateCreateRequest) GetTemplate() *v1alpha1.WorkflowTemplate { + if m != nil { + return m.Template + } + return nil +} + +func (m *WorkflowTemplateCreateRequest) GetCreateOptions() *v1.CreateOptions { + if m != nil { + return m.CreateOptions + } + return nil +} + +func (m *WorkflowTemplateCreateRequest) GetSubmitOptions() *SubmitOptions { + if m != nil { + return m.SubmitOptions + } + return nil +} + +type WorkflowTemplateGetRequest struct { + TemplateName string `protobuf:"bytes,1,opt,name=templateName,proto3" json:"templateName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + GetOptions *v1.GetOptions `protobuf:"bytes,3,opt,name=getOptions,proto3" json:"getOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowTemplateGetRequest) Reset() { *m = WorkflowTemplateGetRequest{} } +func (m *WorkflowTemplateGetRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowTemplateGetRequest) ProtoMessage() {} +func (*WorkflowTemplateGetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ffa03b43b9ae30de, []int{2} +} +func (m *WorkflowTemplateGetRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplateGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowTemplateGetRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowTemplateGetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplateGetRequest.Merge(m, src) +} +func (m *WorkflowTemplateGetRequest) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplateGetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplateGetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplateGetRequest proto.InternalMessageInfo + +func (m *WorkflowTemplateGetRequest) GetTemplateName() string { + if m != nil { + return m.TemplateName + } + return "" +} + +func (m *WorkflowTemplateGetRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *WorkflowTemplateGetRequest) GetGetOptions() *v1.GetOptions { + if m != nil { + return m.GetOptions + } + return nil +} + +type WorkflowTemplateListRequest struct { + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + ListOptions *v1.ListOptions `protobuf:"bytes,2,opt,name=listOptions,proto3" json:"listOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowTemplateListRequest) Reset() { *m = WorkflowTemplateListRequest{} } +func (m *WorkflowTemplateListRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowTemplateListRequest) ProtoMessage() {} +func (*WorkflowTemplateListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ffa03b43b9ae30de, []int{3} +} +func (m *WorkflowTemplateListRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplateListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowTemplateListRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowTemplateListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplateListRequest.Merge(m, src) +} +func (m *WorkflowTemplateListRequest) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplateListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplateListRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplateListRequest proto.InternalMessageInfo + +func (m *WorkflowTemplateListRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *WorkflowTemplateListRequest) GetListOptions() *v1.ListOptions { + if m != nil { + return m.ListOptions + } + return nil +} + +type WorkflowTemplateUpdateRequest struct { + TemplateName string `protobuf:"bytes,1,opt,name=templateName,proto3" json:"templateName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Memoized bool `protobuf:"varint,3,opt,name=memoized,proto3" json:"memoized,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowTemplateUpdateRequest) Reset() { *m = WorkflowTemplateUpdateRequest{} } +func (m *WorkflowTemplateUpdateRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowTemplateUpdateRequest) ProtoMessage() {} +func (*WorkflowTemplateUpdateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ffa03b43b9ae30de, []int{4} +} +func (m *WorkflowTemplateUpdateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplateUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowTemplateUpdateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowTemplateUpdateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplateUpdateRequest.Merge(m, src) +} +func (m *WorkflowTemplateUpdateRequest) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplateUpdateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplateUpdateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplateUpdateRequest proto.InternalMessageInfo + +func (m *WorkflowTemplateUpdateRequest) GetTemplateName() string { + if m != nil { + return m.TemplateName + } + return "" +} + +func (m *WorkflowTemplateUpdateRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *WorkflowTemplateUpdateRequest) GetMemoized() bool { + if m != nil { + return m.Memoized + } + return false +} + +type WorkflowTemplateDeleteRequest struct { + TemplateName string `protobuf:"bytes,1,opt,name=templateName,proto3" json:"templateName,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + DeleteOptions *v1.DeleteOptions `protobuf:"bytes,3,opt,name=deleteOptions,proto3" json:"deleteOptions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowTemplateDeleteRequest) Reset() { *m = WorkflowTemplateDeleteRequest{} } +func (m *WorkflowTemplateDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*WorkflowTemplateDeleteRequest) ProtoMessage() {} +func (*WorkflowTemplateDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ffa03b43b9ae30de, []int{5} +} +func (m *WorkflowTemplateDeleteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowTemplateDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowTemplateDeleteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowTemplateDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowTemplateDeleteRequest.Merge(m, src) +} +func (m *WorkflowTemplateDeleteRequest) XXX_Size() int { + return m.Size() +} +func (m *WorkflowTemplateDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowTemplateDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowTemplateDeleteRequest proto.InternalMessageInfo + +func (m *WorkflowTemplateDeleteRequest) GetTemplateName() string { + if m != nil { + return m.TemplateName + } + return "" +} + +func (m *WorkflowTemplateDeleteRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *WorkflowTemplateDeleteRequest) GetDeleteOptions() *v1.DeleteOptions { + if m != nil { + return m.DeleteOptions + } + return nil +} + +type WorkflowDeleteResponse struct { + TemplateName string `protobuf:"bytes,1,opt,name=templateName,proto3" json:"templateName,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WorkflowDeleteResponse) Reset() { *m = WorkflowDeleteResponse{} } +func (m *WorkflowDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*WorkflowDeleteResponse) ProtoMessage() {} +func (*WorkflowDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ffa03b43b9ae30de, []int{6} +} +func (m *WorkflowDeleteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WorkflowDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WorkflowDeleteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WorkflowDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WorkflowDeleteResponse.Merge(m, src) +} +func (m *WorkflowDeleteResponse) XXX_Size() int { + return m.Size() +} +func (m *WorkflowDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WorkflowDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WorkflowDeleteResponse proto.InternalMessageInfo + +func (m *WorkflowDeleteResponse) GetTemplateName() string { + if m != nil { + return m.TemplateName + } + return "" +} + +func (m *WorkflowDeleteResponse) GetStatus() string { + if m != nil { + return m.Status + } + return "" +} + +func init() { + proto.RegisterType((*SubmitOptions)(nil), "workflowtemplate.SubmitOptions") + proto.RegisterType((*WorkflowTemplateCreateRequest)(nil), "workflowtemplate.WorkflowTemplateCreateRequest") + proto.RegisterType((*WorkflowTemplateGetRequest)(nil), "workflowtemplate.WorkflowTemplateGetRequest") + proto.RegisterType((*WorkflowTemplateListRequest)(nil), "workflowtemplate.WorkflowTemplateListRequest") + proto.RegisterType((*WorkflowTemplateUpdateRequest)(nil), "workflowtemplate.WorkflowTemplateUpdateRequest") + proto.RegisterType((*WorkflowTemplateDeleteRequest)(nil), "workflowtemplate.WorkflowTemplateDeleteRequest") + proto.RegisterType((*WorkflowDeleteResponse)(nil), "workflowtemplate.WorkflowDeleteResponse") +} + +func init() { + proto.RegisterFile("cmd/server/workflowtemplate/workflow-template.proto", fileDescriptor_ffa03b43b9ae30de) +} + +var fileDescriptor_ffa03b43b9ae30de = []byte{ + // 714 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x56, 0xcd, 0x6a, 0x14, 0x4b, + 0x14, 0xa6, 0x26, 0x61, 0xee, 0xa4, 0x72, 0x07, 0x2e, 0xc5, 0x25, 0x77, 0x98, 0x1b, 0x63, 0x68, + 0x90, 0x84, 0xc1, 0x54, 0x65, 0x92, 0x28, 0x12, 0x5c, 0xe5, 0x87, 0x20, 0x04, 0x95, 0x9e, 0x88, + 0xc4, 0x5d, 0xa5, 0xe7, 0xd8, 0x69, 0x67, 0xba, 0xab, 0xed, 0xaa, 0x99, 0xa0, 0x21, 0x1b, 0x9f, + 0x40, 0x70, 0xe1, 0xda, 0x27, 0x50, 0x04, 0xc1, 0x85, 0x2b, 0x57, 0x2e, 0x05, 0x5f, 0x40, 0x82, + 0x0f, 0x22, 0x5d, 0xd3, 0x3d, 0xfd, 0x33, 0x26, 0x69, 0xe2, 0x80, 0xbb, 0xfa, 0xe9, 0xf3, 0x9d, + 0xef, 0xfb, 0xea, 0x9c, 0xea, 0xc2, 0xab, 0x96, 0xdb, 0x66, 0x12, 0x82, 0x3e, 0x04, 0xec, 0x48, + 0x04, 0x9d, 0xc7, 0x5d, 0x71, 0xa4, 0xc0, 0xf5, 0xbb, 0x5c, 0xc1, 0x70, 0x61, 0x29, 0x5e, 0xa1, + 0x7e, 0x20, 0x94, 0x20, 0xff, 0xe4, 0xbf, 0xac, 0xff, 0x6b, 0x0b, 0x5b, 0xe8, 0x4d, 0x16, 0x8e, + 0x06, 0xdf, 0xd5, 0x67, 0x6d, 0x21, 0xec, 0x2e, 0x30, 0xee, 0x3b, 0x8c, 0x7b, 0x9e, 0x50, 0x5c, + 0x39, 0xc2, 0x93, 0xd1, 0xee, 0x5a, 0xe7, 0x96, 0xa4, 0x8e, 0x08, 0x77, 0x5d, 0x6e, 0x1d, 0x3a, + 0x1e, 0x04, 0xcf, 0x98, 0xdf, 0xb1, 0xc3, 0x05, 0xc9, 0x5c, 0x50, 0x9c, 0xf5, 0x9b, 0xcc, 0x06, + 0x0f, 0x02, 0xae, 0xa0, 0x1d, 0x45, 0x6d, 0xda, 0x8e, 0x3a, 0xec, 0x1d, 0x50, 0x4b, 0xb8, 0x8c, + 0x07, 0x3a, 0xe9, 0x13, 0x3d, 0x48, 0x42, 0x63, 0x76, 0xac, 0xdf, 0xe4, 0x5d, 0xff, 0x90, 0x8f, + 0x82, 0x18, 0x49, 0x6a, 0x66, 0x89, 0x00, 0x7e, 0x91, 0xc8, 0x58, 0xc0, 0xd5, 0x56, 0xef, 0xc0, + 0x75, 0xd4, 0x3d, 0x5f, 0xb3, 0x26, 0x33, 0xb8, 0xdc, 0x52, 0x81, 0x63, 0xa9, 0x1a, 0x9a, 0x47, + 0x8b, 0x15, 0x33, 0x9a, 0x19, 0x9f, 0x4b, 0xf8, 0xca, 0xc3, 0x28, 0xe5, 0x5e, 0x64, 0xc8, 0x66, + 0x00, 0x5c, 0x81, 0x09, 0x4f, 0x7b, 0x20, 0x15, 0x99, 0xc5, 0x53, 0x1e, 0x77, 0x41, 0xfa, 0xdc, + 0x02, 0x1d, 0x3c, 0x65, 0x26, 0x0b, 0x84, 0xe3, 0x4a, 0xec, 0x63, 0xad, 0x34, 0x8f, 0x16, 0xa7, + 0x57, 0xb6, 0x69, 0x22, 0x92, 0xc6, 0x22, 0xf5, 0x80, 0xfa, 0x1d, 0x9b, 0x86, 0x22, 0x69, 0x2c, + 0x92, 0xc6, 0x22, 0x69, 0x9e, 0x83, 0x39, 0x84, 0x25, 0xfb, 0xb8, 0x6a, 0x69, 0x46, 0x91, 0x96, + 0xda, 0x84, 0xce, 0xb3, 0x4a, 0x07, 0x3e, 0xd0, 0xf4, 0x11, 0x24, 0x29, 0xc2, 0x23, 0xa0, 0xfd, + 0x26, 0xdd, 0x4c, 0x87, 0x9a, 0x59, 0x24, 0xb2, 0x8d, 0xab, 0x32, 0x6d, 0x53, 0x6d, 0x52, 0x43, + 0x5f, 0xa5, 0xf9, 0x1a, 0xa1, 0x19, 0x37, 0xcd, 0x6c, 0x94, 0xf1, 0x0e, 0xe1, 0x7a, 0x5e, 0xc0, + 0x0e, 0xa8, 0xd8, 0x41, 0x03, 0xff, 0x1d, 0xe3, 0xdc, 0xe5, 0x6e, 0x6c, 0x62, 0x66, 0x2d, 0xeb, + 0x72, 0x29, 0xef, 0xf2, 0x7d, 0x8c, 0x6d, 0x50, 0x59, 0xfd, 0xcb, 0xc5, 0xf4, 0xef, 0x0c, 0xe3, + 0xcc, 0x14, 0x86, 0xf1, 0x12, 0xe1, 0xff, 0xf3, 0x94, 0x77, 0x1d, 0xa9, 0x8a, 0x9d, 0x7a, 0x0b, + 0x4f, 0x77, 0x1d, 0x39, 0x24, 0x34, 0x38, 0xf8, 0x66, 0x31, 0x42, 0xbb, 0x49, 0xa0, 0x99, 0x46, + 0x31, 0x4e, 0x46, 0x2b, 0xf1, 0x81, 0xdf, 0x4e, 0x55, 0xe2, 0xef, 0xfb, 0x58, 0xc7, 0x15, 0x17, + 0x5c, 0xe1, 0x3c, 0x87, 0xb6, 0x76, 0xb1, 0x62, 0x0e, 0xe7, 0xc6, 0x27, 0x34, 0x9a, 0x7f, 0x0b, + 0xba, 0x30, 0xce, 0xfc, 0xfb, 0xb8, 0xda, 0xd6, 0x90, 0x97, 0x2a, 0xe5, 0xad, 0x74, 0xa8, 0x99, + 0x45, 0x32, 0xf6, 0xf0, 0x4c, 0xcc, 0x3e, 0x66, 0x2d, 0x7d, 0xe1, 0x49, 0x28, 0x44, 0x7b, 0x06, + 0x97, 0xa5, 0xe2, 0xaa, 0x27, 0x23, 0xce, 0xd1, 0x6c, 0xe5, 0xf5, 0x5f, 0xf8, 0xbf, 0xbc, 0x29, + 0x2d, 0x08, 0xfa, 0x8e, 0x05, 0xe4, 0x3d, 0xc2, 0xe5, 0x41, 0x77, 0x11, 0x36, 0xda, 0x30, 0xe7, + 0x5e, 0x2a, 0xf5, 0xf1, 0x5c, 0x12, 0xc6, 0xf2, 0x8b, 0x6f, 0x3f, 0x5e, 0x95, 0x1a, 0xc6, 0x35, + 0x7d, 0x19, 0xf6, 0x9b, 0x23, 0x7f, 0x01, 0xc9, 0x8e, 0x87, 0xee, 0x9f, 0xac, 0xa3, 0x06, 0xf9, + 0x88, 0xf0, 0xc4, 0x0e, 0x28, 0x72, 0xfd, 0x62, 0xc6, 0x49, 0x07, 0x8f, 0x8b, 0xee, 0x6d, 0x4d, + 0xf7, 0x26, 0x59, 0x2b, 0x44, 0x97, 0x1d, 0xa7, 0x8f, 0xe8, 0x84, 0xbc, 0x45, 0x78, 0x32, 0x6c, + 0x1e, 0xb2, 0x74, 0x31, 0xf7, 0x54, 0x2b, 0xd7, 0xef, 0x8c, 0x85, 0x7c, 0x88, 0x68, 0x2c, 0x69, + 0x01, 0x0b, 0xa4, 0x98, 0xdf, 0xe4, 0x0d, 0xc2, 0xe5, 0x41, 0x31, 0x16, 0xa9, 0x90, 0x4c, 0xb3, + 0xd5, 0x17, 0xcf, 0x0e, 0xc8, 0xd6, 0x77, 0xec, 0x6a, 0xe3, 0x72, 0xae, 0x7e, 0xd0, 0xae, 0x7a, + 0xea, 0x8f, 0xd5, 0xf0, 0x0d, 0x4d, 0x9f, 0x19, 0x8d, 0x62, 0xf4, 0xbb, 0x8e, 0xa7, 0xd6, 0x51, + 0x63, 0x63, 0xe3, 0xcb, 0xe9, 0x1c, 0xfa, 0x7a, 0x3a, 0x87, 0xbe, 0x9f, 0xce, 0xa1, 0x47, 0x6b, + 0x67, 0x3e, 0x2c, 0xce, 0x79, 0x22, 0x1d, 0x94, 0xf5, 0x63, 0x61, 0xf5, 0x67, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xc5, 0xf4, 0x79, 0x6c, 0x48, 0x09, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// WorkflowTemplateServiceClient is the client API for WorkflowTemplateService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type WorkflowTemplateServiceClient interface { + Create(ctx context.Context, in *WorkflowTemplateCreateRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) + Get(ctx context.Context, in *WorkflowTemplateGetRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) + List(ctx context.Context, in *WorkflowTemplateListRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowTemplateList, error) + Delete(ctx context.Context, in *WorkflowTemplateDeleteRequest, opts ...grpc.CallOption) (*WorkflowDeleteResponse, error) + Lint(ctx context.Context, in *WorkflowTemplateCreateRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) +} + +type workflowTemplateServiceClient struct { + cc *grpc.ClientConn +} + +func NewWorkflowTemplateServiceClient(cc *grpc.ClientConn) WorkflowTemplateServiceClient { + return &workflowTemplateServiceClient{cc} +} + +func (c *workflowTemplateServiceClient) Create(ctx context.Context, in *WorkflowTemplateCreateRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) { + out := new(v1alpha1.WorkflowTemplate) + err := c.cc.Invoke(ctx, "/workflowtemplate.WorkflowTemplateService/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowTemplateServiceClient) Get(ctx context.Context, in *WorkflowTemplateGetRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) { + out := new(v1alpha1.WorkflowTemplate) + err := c.cc.Invoke(ctx, "/workflowtemplate.WorkflowTemplateService/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowTemplateServiceClient) List(ctx context.Context, in *WorkflowTemplateListRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowTemplateList, error) { + out := new(v1alpha1.WorkflowTemplateList) + err := c.cc.Invoke(ctx, "/workflowtemplate.WorkflowTemplateService/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowTemplateServiceClient) Delete(ctx context.Context, in *WorkflowTemplateDeleteRequest, opts ...grpc.CallOption) (*WorkflowDeleteResponse, error) { + out := new(WorkflowDeleteResponse) + err := c.cc.Invoke(ctx, "/workflowtemplate.WorkflowTemplateService/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowTemplateServiceClient) Lint(ctx context.Context, in *WorkflowTemplateCreateRequest, opts ...grpc.CallOption) (*v1alpha1.WorkflowTemplate, error) { + out := new(v1alpha1.WorkflowTemplate) + err := c.cc.Invoke(ctx, "/workflowtemplate.WorkflowTemplateService/Lint", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// WorkflowTemplateServiceServer is the server API for WorkflowTemplateService service. +type WorkflowTemplateServiceServer interface { + Create(context.Context, *WorkflowTemplateCreateRequest) (*v1alpha1.WorkflowTemplate, error) + Get(context.Context, *WorkflowTemplateGetRequest) (*v1alpha1.WorkflowTemplate, error) + List(context.Context, *WorkflowTemplateListRequest) (*v1alpha1.WorkflowTemplateList, error) + Delete(context.Context, *WorkflowTemplateDeleteRequest) (*WorkflowDeleteResponse, error) + Lint(context.Context, *WorkflowTemplateCreateRequest) (*v1alpha1.WorkflowTemplate, error) +} + +// UnimplementedWorkflowTemplateServiceServer can be embedded to have forward compatible implementations. +type UnimplementedWorkflowTemplateServiceServer struct { +} + +func (*UnimplementedWorkflowTemplateServiceServer) Create(ctx context.Context, req *WorkflowTemplateCreateRequest) (*v1alpha1.WorkflowTemplate, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (*UnimplementedWorkflowTemplateServiceServer) Get(ctx context.Context, req *WorkflowTemplateGetRequest) (*v1alpha1.WorkflowTemplate, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedWorkflowTemplateServiceServer) List(ctx context.Context, req *WorkflowTemplateListRequest) (*v1alpha1.WorkflowTemplateList, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (*UnimplementedWorkflowTemplateServiceServer) Delete(ctx context.Context, req *WorkflowTemplateDeleteRequest) (*WorkflowDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedWorkflowTemplateServiceServer) Lint(ctx context.Context, req *WorkflowTemplateCreateRequest) (*v1alpha1.WorkflowTemplate, error) { + return nil, status.Errorf(codes.Unimplemented, "method Lint not implemented") +} + +func RegisterWorkflowTemplateServiceServer(s *grpc.Server, srv WorkflowTemplateServiceServer) { + s.RegisterService(&_WorkflowTemplateService_serviceDesc, srv) +} + +func _WorkflowTemplateService_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowTemplateCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflowtemplate.WorkflowTemplateService/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).Create(ctx, req.(*WorkflowTemplateCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowTemplateService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowTemplateGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflowtemplate.WorkflowTemplateService/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).Get(ctx, req.(*WorkflowTemplateGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowTemplateService_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowTemplateListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflowtemplate.WorkflowTemplateService/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).List(ctx, req.(*WorkflowTemplateListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowTemplateService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowTemplateDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflowtemplate.WorkflowTemplateService/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).Delete(ctx, req.(*WorkflowTemplateDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowTemplateService_Lint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WorkflowTemplateCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).Lint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/workflowtemplate.WorkflowTemplateService/Lint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).Lint(ctx, req.(*WorkflowTemplateCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _WorkflowTemplateService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "workflowtemplate.WorkflowTemplateService", + HandlerType: (*WorkflowTemplateServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _WorkflowTemplateService_Create_Handler, + }, + { + MethodName: "Get", + Handler: _WorkflowTemplateService_Get_Handler, + }, + { + MethodName: "List", + Handler: _WorkflowTemplateService_List_Handler, + }, + { + MethodName: "Delete", + Handler: _WorkflowTemplateService_Delete_Handler, + }, + { + MethodName: "Lint", + Handler: _WorkflowTemplateService_Lint_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cmd/server/workflowtemplate/workflow-template.proto", +} + +func (m *SubmitOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubmitOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubmitOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Strict { + i-- + if m.Strict { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WorkflowTemplateCreateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplateCreateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTemplateCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.SubmitOptions != nil { + { + size, err := m.SubmitOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.CreateOptions != nil { + { + size, err := m.CreateOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowTemplateGetRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplateGetRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTemplateGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.GetOptions != nil { + { + size, err := m.GetOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.TemplateName) > 0 { + i -= len(m.TemplateName) + copy(dAtA[i:], m.TemplateName) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.TemplateName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowTemplateListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplateListRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTemplateListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ListOptions != nil { + { + size, err := m.ListOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowTemplateUpdateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplateUpdateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTemplateUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Memoized { + i-- + if m.Memoized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.TemplateName) > 0 { + i -= len(m.TemplateName) + copy(dAtA[i:], m.TemplateName) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.TemplateName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowTemplateDeleteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowTemplateDeleteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowTemplateDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + } + if len(m.TemplateName) > 0 { + i -= len(m.TemplateName) + copy(dAtA[i:], m.TemplateName) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.TemplateName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WorkflowDeleteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowDeleteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WorkflowDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Status) > 0 { + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + } + if len(m.TemplateName) > 0 { + i -= len(m.TemplateName) + copy(dAtA[i:], m.TemplateName) + i = encodeVarintWorkflowTemplate(dAtA, i, uint64(len(m.TemplateName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWorkflowTemplate(dAtA []byte, offset int, v uint64) int { + offset -= sovWorkflowTemplate(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SubmitOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Strict { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowTemplateCreateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.CreateOptions != nil { + l = m.CreateOptions.Size() + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.SubmitOptions != nil { + l = m.SubmitOptions.Size() + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowTemplateGetRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TemplateName) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.GetOptions != nil { + l = m.GetOptions.Size() + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowTemplateListRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.ListOptions != nil { + l = m.ListOptions.Size() + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowTemplateUpdateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TemplateName) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.Memoized { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowTemplateDeleteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TemplateName) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WorkflowDeleteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TemplateName) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + l = len(m.Status) + if l > 0 { + n += 1 + l + sovWorkflowTemplate(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWorkflowTemplate(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWorkflowTemplate(x uint64) (n int) { + return sovWorkflowTemplate(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *SubmitOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubmitOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubmitOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Strict = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplateCreateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplateCreateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplateCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &v1alpha1.WorkflowTemplate{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreateOptions == nil { + m.CreateOptions = &v1.CreateOptions{} + } + if err := m.CreateOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubmitOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SubmitOptions == nil { + m.SubmitOptions = &SubmitOptions{} + } + if err := m.SubmitOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplateGetRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplateGetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplateGetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GetOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GetOptions == nil { + m.GetOptions = &v1.GetOptions{} + } + if err := m.GetOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplateListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplateListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplateListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ListOptions == nil { + m.ListOptions = &v1.ListOptions{} + } + if err := m.ListOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplateUpdateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplateUpdateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplateUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Memoized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Memoized = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowTemplateDeleteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowTemplateDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowTemplateDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowDeleteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowDeleteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TemplateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorkflowTemplate + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorkflowTemplate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWorkflowTemplate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWorkflowTemplate(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWorkflowTemplate + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthWorkflowTemplate + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWorkflowTemplate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWorkflowTemplate(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthWorkflowTemplate + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWorkflowTemplate = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWorkflowTemplate = fmt.Errorf("proto: integer overflow") +) diff --git a/cmd/server/workflowtemplate/workflow-template.pb.gw.go b/cmd/server/workflowtemplate/workflow-template.pb.gw.go new file mode 100644 index 000000000000..7cccaccecc80 --- /dev/null +++ b/cmd/server/workflowtemplate/workflow-template.pb.gw.go @@ -0,0 +1,693 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: cmd/server/workflowtemplate/workflow-template.proto + +/* +Package workflowtemplate is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package workflowtemplate + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_WorkflowTemplateService_Create_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateCreateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.Create(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowTemplateService_Create_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateCreateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.Create(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowTemplateService_Get_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "templateName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_WorkflowTemplateService_Get_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateGetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["templateName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "templateName") + } + + protoReq.TemplateName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "templateName", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowTemplateService_Get_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Get(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowTemplateService_Get_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateGetRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["templateName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "templateName") + } + + protoReq.TemplateName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "templateName", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WorkflowTemplateService_Get_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Get(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowTemplateService_List_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_WorkflowTemplateService_List_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateListRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowTemplateService_List_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.List(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowTemplateService_List_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateListRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WorkflowTemplateService_List_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.List(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_WorkflowTemplateService_Delete_0 = &utilities.DoubleArray{Encoding: map[string]int{"namespace": 0, "templateName": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_WorkflowTemplateService_Delete_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateDeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["templateName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "templateName") + } + + protoReq.TemplateName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "templateName", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_WorkflowTemplateService_Delete_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Delete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowTemplateService_Delete_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateDeleteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + val, ok = pathParams["templateName"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "templateName") + } + + protoReq.TemplateName, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "templateName", err) + } + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_WorkflowTemplateService_Delete_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Delete(ctx, &protoReq) + return msg, metadata, err + +} + +func request_WorkflowTemplateService_Lint_0(ctx context.Context, marshaler runtime.Marshaler, client WorkflowTemplateServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateCreateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := client.Lint(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_WorkflowTemplateService_Lint_0(ctx context.Context, marshaler runtime.Marshaler, server WorkflowTemplateServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq WorkflowTemplateCreateRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + + msg, err := server.Lint(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterWorkflowTemplateServiceHandlerServer registers the http handlers for service WorkflowTemplateService to "mux". +// UnaryRPC :call WorkflowTemplateServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +func RegisterWorkflowTemplateServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server WorkflowTemplateServiceServer) error { + + mux.Handle("POST", pattern_WorkflowTemplateService_Create_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowTemplateService_Create_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_Create_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowTemplateService_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowTemplateService_Get_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowTemplateService_List_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowTemplateService_List_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_List_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_WorkflowTemplateService_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowTemplateService_Delete_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowTemplateService_Lint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_WorkflowTemplateService_Lint_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_Lint_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterWorkflowTemplateServiceHandlerFromEndpoint is same as RegisterWorkflowTemplateServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterWorkflowTemplateServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterWorkflowTemplateServiceHandler(ctx, mux, conn) +} + +// RegisterWorkflowTemplateServiceHandler registers the http handlers for service WorkflowTemplateService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterWorkflowTemplateServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterWorkflowTemplateServiceHandlerClient(ctx, mux, NewWorkflowTemplateServiceClient(conn)) +} + +// RegisterWorkflowTemplateServiceHandlerClient registers the http handlers for service WorkflowTemplateService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WorkflowTemplateServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WorkflowTemplateServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "WorkflowTemplateServiceClient" to call the correct interceptors. +func RegisterWorkflowTemplateServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client WorkflowTemplateServiceClient) error { + + mux.Handle("POST", pattern_WorkflowTemplateService_Create_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowTemplateService_Create_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_Create_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowTemplateService_Get_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowTemplateService_Get_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_Get_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_WorkflowTemplateService_List_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowTemplateService_List_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_List_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_WorkflowTemplateService_Delete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowTemplateService_Delete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_Delete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_WorkflowTemplateService_Lint_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_WorkflowTemplateService_Lint_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_WorkflowTemplateService_Lint_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_WorkflowTemplateService_Create_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflowtemplates", "namespace"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowTemplateService_Get_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflowtemplates", "namespace", "templateName"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowTemplateService_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "workflowtemplates", "namespace"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowTemplateService_Delete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4}, []string{"api", "v1", "workflowtemplates", "namespace", "templateName"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_WorkflowTemplateService_Lint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "workflowtemplates", "namespace", "lint"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_WorkflowTemplateService_Create_0 = runtime.ForwardResponseMessage + + forward_WorkflowTemplateService_Get_0 = runtime.ForwardResponseMessage + + forward_WorkflowTemplateService_List_0 = runtime.ForwardResponseMessage + + forward_WorkflowTemplateService_Delete_0 = runtime.ForwardResponseMessage + + forward_WorkflowTemplateService_Lint_0 = runtime.ForwardResponseMessage +) diff --git a/cmd/server/workflowtemplate/workflow-template.proto b/cmd/server/workflowtemplate/workflow-template.proto new file mode 100644 index 000000000000..de5228d2bb98 --- /dev/null +++ b/cmd/server/workflowtemplate/workflow-template.proto @@ -0,0 +1,82 @@ +syntax = "proto3"; +option go_package = "github.com/argoproj/argo/cmd/server/workflowtemplate"; + + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1/generated.proto"; +import "k8s.io/api/core/v1/generated.proto"; + +// Workflow Service +// +// Workflow Service API performs CRUD actions against application resources +package workflowtemplate; + +message SubmitOptions { + bool Strict = 1; +} + +message WorkflowTemplateCreateRequest { + string namespace = 1; + github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplate template = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions createOptions = 3; + SubmitOptions submitOptions = 4; +} + +message WorkflowTemplateGetRequest { + string templateName = 1; + string namespace = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions getOptions = 3; +} + +message WorkflowTemplateListRequest { + string namespace = 1; + k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions listOptions = 2; +} + +message WorkflowTemplateUpdateRequest { + string templateName = 1; + string namespace = 2; + bool memoized = 3; +} + +message WorkflowTemplateDeleteRequest { + string templateName = 1; + string namespace = 2; + k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 3; +} +message WorkflowDeleteResponse { + string templateName = 1; + string status = 2; +} + + +service WorkflowTemplateService { + rpc Create (WorkflowTemplateCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplate) { + option (google.api.http) = { + post: "/api/v1/workflowtemplates/{namespace}" + body: "*" + }; + } + + rpc Get (WorkflowTemplateGetRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplate) { + option (google.api.http).get = "/api/v1/workflowtemplates/{namespace}/{templateName}"; + } + + rpc List (WorkflowTemplateListRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplateList) { + option (google.api.http).get = "/api/v1/workflowtemplates/{namespace}"; + } + + rpc Delete (WorkflowTemplateDeleteRequest) returns (WorkflowDeleteResponse) { + option (google.api.http).delete = "/api/v1/workflowtemplates/{namespace}/{templateName}"; + } + + rpc Lint (WorkflowTemplateCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.WorkflowTemplate) { + option (google.api.http) = { + post: "/api/v1/workflowtemplates/{namespace}/lint" + body: "*" + }; + } + +} \ No newline at end of file diff --git a/cmd/server/workflowtemplate/workflow-template.swagger.json b/cmd/server/workflowtemplate/workflow-template.swagger.json new file mode 100644 index 000000000000..9a51bc36d808 --- /dev/null +++ b/cmd/server/workflowtemplate/workflow-template.swagger.json @@ -0,0 +1,3571 @@ +{ + "swagger": "2.0", + "info": { + "title": "Workflow Service", + "description": "Workflow Service API performs CRUD actions against application resources", + "version": "version not set" + }, + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/api/v1/workflowtemplates/{namespace}": { + "get": { + "operationId": "List", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1WorkflowTemplateList" + } + } + }, + "parameters": [ + { + "name": "namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "listOptions.labelSelector", + "description": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "listOptions.fieldSelector", + "description": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "listOptions.watch", + "description": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "listOptions.allowWatchBookmarks", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n\nThis field is beta.\n\n+optional", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "listOptions.resourceVersion", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "listOptions.timeoutSeconds", + "description": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "listOptions.limit", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "listOptions.continue", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "required": false, + "type": "string" + } + ], + "tags": [ + "WorkflowTemplateService" + ] + }, + "post": { + "operationId": "Create", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1WorkflowTemplate" + } + } + }, + "parameters": [ + { + "name": "namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/workflowtemplateWorkflowTemplateCreateRequest" + } + } + ], + "tags": [ + "WorkflowTemplateService" + ] + } + }, + "/api/v1/workflowtemplates/{namespace}/lint": { + "post": { + "operationId": "Lint", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1WorkflowTemplate" + } + } + }, + "parameters": [ + { + "name": "namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/workflowtemplateWorkflowTemplateCreateRequest" + } + } + ], + "tags": [ + "WorkflowTemplateService" + ] + } + }, + "/api/v1/workflowtemplates/{namespace}/{templateName}": { + "get": { + "operationId": "Get", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1WorkflowTemplate" + } + } + }, + "parameters": [ + { + "name": "namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "templateName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "getOptions.resourceVersion", + "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.", + "in": "query", + "required": false, + "type": "string" + } + ], + "tags": [ + "WorkflowTemplateService" + ] + }, + "delete": { + "operationId": "Delete", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/workflowtemplateWorkflowDeleteResponse" + } + } + }, + "parameters": [ + { + "name": "namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "templateName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "deleteOptions.gracePeriodSeconds", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "deleteOptions.preconditions.uid", + "description": "Specifies the target UID.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "deleteOptions.preconditions.resourceVersion", + "description": "Specifies the target ResourceVersion\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "deleteOptions.orphanDependents", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "deleteOptions.propagationPolicy", + "description": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional.", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "deleteOptions.dryRun", + "description": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional.", + "in": "query", + "required": false, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi" + } + ], + "tags": [ + "WorkflowTemplateService" + ] + } + } + }, + "definitions": { + "apismetav1Preconditions": { + "type": "object", + "properties": { + "uid": { + "type": "string", + "title": "Specifies the target UID.\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "Specifies the target ResourceVersion\n+optional" + } + }, + "description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out." + }, + "intstrIntOrString": { + "type": "object", + "properties": { + "type": { + "type": "string", + "format": "int64" + }, + "intVal": { + "type": "integer", + "format": "int32" + }, + "strVal": { + "type": "string" + } + }, + "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", + "title": "IntOrString is a type that can hold an int32 or a string. When used in\nJSON or YAML marshalling and unmarshalling, it produces or consumes the\ninner type. This allows you to have, for example, a JSON field that can\naccept a name or number.\nTODO: Rename to Int32OrString" + }, + "resourceQuantity": { + "type": "object", + "properties": { + "string": { + "type": "string" + } + }, + "description": "Quantity is a fixed-point representation of a number.\nIt provides convenient marshaling/unmarshaling in JSON and YAML,\nin addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9\n\u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e\n\u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e\n\u003csign\u003e ::= \"+\" | \"-\"\n\u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e\n\u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e\n\u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent\na number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal\nplaces. Numbers larger or more precise will be capped or rounded up.\n(E.g.: 0.1m will rounded up to 1m.)\nThis may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix\nit had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\".\nThis means that Exponent/suffix will be adjusted up or down (with a\ncorresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a\nfloating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed,\nbut will be re-emitted in their canonical form. (So always use canonical\nform, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without\nwriting some sort of special handling code in the hopes that that will\ncause implementors to also use a fixed point implementation.\n\n+protobuf=true\n+protobuf.embed=string\n+protobuf.options.marshal=false\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:deepcopy-gen=true\n+k8s:openapi-gen=true" + }, + "v1AWSElasticBlockStoreVolumeSource": { + "type": "object", + "properties": { + "volumeID": { + "type": "string", + "title": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "partition": { + "type": "integer", + "format": "int32", + "title": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\".\nIf omitted, the default is \"false\".\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" + } + }, + "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk\nmust also be in the same AWS zone as the kubelet. An AWS EBS disk\ncan only be mounted as read/write once. AWS EBS volumes support\nownership management and SELinux relabeling." + }, + "v1Affinity": { + "type": "object", + "properties": { + "nodeAffinity": { + "$ref": "#/definitions/v1NodeAffinity", + "title": "Describes node affinity scheduling rules for the pod.\n+optional" + }, + "podAffinity": { + "$ref": "#/definitions/v1PodAffinity", + "title": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).\n+optional" + }, + "podAntiAffinity": { + "$ref": "#/definitions/v1PodAntiAffinity", + "title": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).\n+optional" + } + }, + "description": "Affinity is a group of affinity scheduling rules." + }, + "v1AzureDiskVolumeSource": { + "type": "object", + "properties": { + "diskName": { + "type": "string", + "title": "The Name of the data disk in the blob storage" + }, + "diskURI": { + "type": "string", + "title": "The URI the data disk in the blob storage" + }, + "cachingMode": { + "type": "string", + "title": "Host Caching mode: None, Read Only, Read Write.\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "kind": { + "type": "string", + "title": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared" + } + }, + "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." + }, + "v1AzureFileVolumeSource": { + "type": "object", + "properties": { + "secretName": { + "type": "string", + "title": "the name of secret that contains Azure Storage Account Name and Key" + }, + "shareName": { + "type": "string", + "title": "Share Name" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + } + }, + "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod." + }, + "v1CSIVolumeSource": { + "type": "object", + "properties": { + "driver": { + "type": "string", + "description": "Driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Specifies a read-only configuration for the volume.\nDefaults to false (read/write).\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply.\n+optional" + }, + "volumeAttributes": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "VolumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values.\n+optional" + }, + "nodePublishSecretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "NodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed.\n+optional" + } + }, + "title": "Represents a source location of a volume to mount, managed by an external CSI driver" + }, + "v1Capabilities": { + "type": "object", + "properties": { + "add": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Added capabilities\n+optional" + }, + "drop": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Removed capabilities\n+optional" + } + }, + "description": "Adds and removes POSIX capabilities from running containers." + }, + "v1CephFSVolumeSource": { + "type": "object", + "properties": { + "monitors": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + }, + "path": { + "type": "string", + "title": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /\n+optional" + }, + "user": { + "type": "string", + "title": "Optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + }, + "secretFile": { + "type": "string", + "title": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it\n+optional" + } + }, + "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod\nCephfs volumes do not support ownership management or SELinux relabeling." + }, + "v1CinderVolumeSource": { + "type": "object", + "properties": { + "volumeID": { + "type": "string", + "title": "volume id used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "Optional: points to a secret object containing parameters used to connect\nto OpenStack.\n+optional" + } + }, + "description": "Represents a cinder volume resource in Openstack.\nA Cinder volume must exist before mounting to a container.\nThe volume must also be in the same region as the kubelet.\nCinder volumes support ownership management and SELinux relabeling." + }, + "v1ConfigMapEnvSource": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "The ConfigMap to select from." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap must be defined\n+optional" + } + }, + "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment\nvariables with.\n\nThe contents of the target ConfigMap's Data field will represent the\nkey-value pairs as environment variables." + }, + "v1ConfigMapKeySelector": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "The ConfigMap to select from." + }, + "key": { + "type": "string", + "description": "The key to select." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap or its key must be defined\n+optional" + } + }, + "description": "Selects a key from a ConfigMap." + }, + "v1ConfigMapProjection": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap or its keys must be defined\n+optional" + } + }, + "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nprojected volume as files using the keys in the Data field as the file names,\nunless the items element is populated with specific mappings of keys to paths.\nNote that this is identical to a configmap volume source without the default\nmode." + }, + "v1ConfigMapVolumeSource": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on created files by default. Must be a\nvalue between 0 and 0777. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the ConfigMap or its keys must be defined\n+optional" + } + }, + "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a\nvolume as files using the keys in the Data field as the file names, unless\nthe items element is populated with specific mappings of keys to paths.\nConfigMap volumes support ownership management and SELinux relabeling." + }, + "v1Container": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated." + }, + "image": { + "type": "string", + "title": "Docker image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.\n+optional" + }, + "command": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Entrypoint array. Not executed within a shell.\nThe docker image's ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + }, + "args": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Arguments to the entrypoint.\nThe docker image's CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container's environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax\ncan be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,\nregardless of whether the variable exists or not.\nCannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell\n+optional" + }, + "workingDir": { + "type": "string", + "title": "Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated.\n+optional" + }, + "ports": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ContainerPort" + }, + "title": "List of ports to expose from the container. Exposing a port here gives\nthe system additional information about the network connections a\ncontainer uses, but is primarily informational. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default \"0.0.0.0\" address inside a container will be\naccessible from the network.\nCannot be updated.\n+optional\n+patchMergeKey=containerPort\n+patchStrategy=merge\n+listType=map\n+listMapKey=containerPort\n+listMapKey=protocol" + }, + "envFrom": { + "type": "array", + "items": { + "$ref": "#/definitions/v1EnvFromSource" + }, + "title": "List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated.\n+optional" + }, + "env": { + "type": "array", + "items": { + "$ref": "#/definitions/v1EnvVar" + }, + "title": "List of environment variables to set in the container.\nCannot be updated.\n+optional\n+patchMergeKey=name\n+patchStrategy=merge" + }, + "resources": { + "$ref": "#/definitions/v1ResourceRequirements", + "title": "Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/\n+optional" + }, + "volumeMounts": { + "type": "array", + "items": { + "$ref": "#/definitions/v1VolumeMount" + }, + "title": "Pod volumes to mount into the container's filesystem.\nCannot be updated.\n+optional\n+patchMergeKey=mountPath\n+patchStrategy=merge" + }, + "volumeDevices": { + "type": "array", + "items": { + "$ref": "#/definitions/v1VolumeDevice" + }, + "title": "volumeDevices is the list of block devices to be used by the container.\nThis is a beta feature.\n+patchMergeKey=devicePath\n+patchStrategy=merge\n+optional" + }, + "livenessProbe": { + "$ref": "#/definitions/v1Probe", + "title": "Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "readinessProbe": { + "$ref": "#/definitions/v1Probe", + "title": "Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "startupProbe": { + "$ref": "#/definitions/v1Probe", + "title": "StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nThis is an alpha feature enabled by the StartupProbe feature flag.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "lifecycle": { + "$ref": "#/definitions/v1Lifecycle", + "title": "Actions that the management system should take in response to container lifecycle events.\nCannot be updated.\n+optional" + }, + "terminationMessagePath": { + "type": "string", + "title": "Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated.\n+optional" + }, + "terminationMessagePolicy": { + "type": "string", + "title": "Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated.\n+optional" + }, + "imagePullPolicy": { + "type": "string", + "title": "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n+optional" + }, + "securityContext": { + "$ref": "#/definitions/v1SecurityContext", + "title": "Security options the pod should run with.\nMore info: https://kubernetes.io/docs/concepts/policy/security-context/\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/\n+optional" + }, + "stdin": { + "type": "boolean", + "format": "boolean", + "title": "Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false.\n+optional" + }, + "stdinOnce": { + "type": "boolean", + "format": "boolean", + "title": "Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false\n+optional" + }, + "tty": { + "type": "boolean", + "format": "boolean", + "title": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false.\n+optional" + } + }, + "description": "A single application container that you want to run within a pod." + }, + "v1ContainerPort": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services.\n+optional" + }, + "hostPort": { + "type": "integer", + "format": "int32", + "title": "Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 \u003c x \u003c 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this.\n+optional" + }, + "containerPort": { + "type": "integer", + "format": "int32", + "description": "Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 \u003c x \u003c 65536." + }, + "protocol": { + "type": "string", + "title": "Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to \"TCP\".\n+optional" + }, + "hostIP": { + "type": "string", + "title": "What host IP to bind the external port to.\n+optional" + } + }, + "description": "ContainerPort represents a network port in a single container." + }, + "v1CreateOptions": { + "type": "object", + "properties": { + "dryRun": { + "type": "array", + "items": { + "type": "string" + }, + "title": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional" + }, + "fieldManager": { + "type": "string", + "title": "fieldManager is a name associated with the actor or entity\nthat is making these changes. The value must be less than or\n128 characters long, and only contain printable characters,\nas defined by https://golang.org/pkg/unicode/#IsPrint.\n+optional" + } + }, + "description": "CreateOptions may be provided when creating an API object." + }, + "v1DeleteOptions": { + "type": "object", + "properties": { + "gracePeriodSeconds": { + "type": "string", + "format": "int64", + "title": "The duration in seconds before the object should be deleted. Value must be non-negative integer.\nThe value zero indicates delete immediately. If this value is nil, the default grace period for the\nspecified type will be used.\nDefaults to a per object value if not specified. zero means delete immediately.\n+optional" + }, + "preconditions": { + "$ref": "#/definitions/apismetav1Preconditions", + "title": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be\nreturned.\n+optional" + }, + "orphanDependents": { + "type": "boolean", + "format": "boolean", + "title": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.\nShould the dependent objects be orphaned. If true/false, the \"orphan\"\nfinalizer will be added to/removed from the object's finalizers list.\nEither this field or PropagationPolicy may be set, but not both.\n+optional" + }, + "propagationPolicy": { + "type": "string", + "title": "Whether and how garbage collection will be performed.\nEither this field or OrphanDependents may be set, but not both.\nThe default policy is decided by the existing finalizer set in the\nmetadata.finalizers and the resource-specific default policy.\nAcceptable values are: 'Orphan' - orphan the dependents; 'Background' -\nallow the garbage collector to delete the dependents in the background;\n'Foreground' - a cascading policy that deletes all dependents in the\nforeground.\n+optional" + }, + "dryRun": { + "type": "array", + "items": { + "type": "string" + }, + "title": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional" + } + }, + "description": "DeleteOptions may be provided when deleting an API object." + }, + "v1DownwardAPIProjection": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1DownwardAPIVolumeFile" + }, + "title": "Items is a list of DownwardAPIVolume file\n+optional" + } + }, + "description": "Represents downward API info for projecting into a projected volume.\nNote that this is identical to a downwardAPI volume source without the default\nmode." + }, + "v1DownwardAPIVolumeFile": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'" + }, + "fieldRef": { + "$ref": "#/definitions/v1ObjectFieldSelector", + "title": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.\n+optional" + }, + "resourceFieldRef": { + "$ref": "#/definitions/v1ResourceFieldSelector", + "title": "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.\n+optional" + }, + "mode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on this file, must be a value between 0\nand 0777. If not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "title": "DownwardAPIVolumeFile represents information to create the file containing the pod field" + }, + "v1DownwardAPIVolumeSource": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1DownwardAPIVolumeFile" + }, + "title": "Items is a list of downward API volume file\n+optional" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on created files by default. Must be a\nvalue between 0 and 0777. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "description": "DownwardAPIVolumeSource represents a volume containing downward API info.\nDownward API volumes support ownership management and SELinux relabeling." + }, + "v1EmptyDirVolumeSource": { + "type": "object", + "properties": { + "medium": { + "type": "string", + "title": "What type of storage medium should back this directory.\nThe default is \"\" which means to use the node's default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" + }, + "sizeLimit": { + "$ref": "#/definitions/resourceQuantity", + "title": "Total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: http://kubernetes.io/docs/user-guide/volumes#emptydir\n+optional" + } + }, + "description": "Represents an empty directory for a pod.\nEmpty directory volumes support ownership management and SELinux relabeling." + }, + "v1EnvFromSource": { + "type": "object", + "properties": { + "prefix": { + "type": "string", + "title": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.\n+optional" + }, + "configMapRef": { + "$ref": "#/definitions/v1ConfigMapEnvSource", + "title": "The ConfigMap to select from\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1SecretEnvSource", + "title": "The Secret to select from\n+optional" + } + }, + "title": "EnvFromSource represents the source of a set of ConfigMaps" + }, + "v1EnvVar": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name of the environment variable. Must be a C_IDENTIFIER." + }, + "value": { + "type": "string", + "title": "Variable references $(VAR_NAME) are expanded\nusing the previous defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. The $(VAR_NAME)\nsyntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped\nreferences will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to \"\".\n+optional" + }, + "valueFrom": { + "$ref": "#/definitions/v1EnvVarSource", + "title": "Source for the environment variable's value. Cannot be used if value is not empty.\n+optional" + } + }, + "description": "EnvVar represents an environment variable present in a Container." + }, + "v1EnvVarSource": { + "type": "object", + "properties": { + "fieldRef": { + "$ref": "#/definitions/v1ObjectFieldSelector", + "title": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.\n+optional" + }, + "resourceFieldRef": { + "$ref": "#/definitions/v1ResourceFieldSelector", + "title": "Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.\n+optional" + }, + "configMapKeyRef": { + "$ref": "#/definitions/v1ConfigMapKeySelector", + "title": "Selects a key of a ConfigMap.\n+optional" + }, + "secretKeyRef": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "Selects a key of a secret in the pod's namespace\n+optional" + } + }, + "description": "EnvVarSource represents a source for the value of an EnvVar." + }, + "v1ExecAction": { + "type": "object", + "properties": { + "command": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.\n+optional" + } + }, + "description": "ExecAction describes a \"run in container\" action." + }, + "v1FCVolumeSource": { + "type": "object", + "properties": { + "targetWWNs": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Optional: FC target worldwide names (WWNs)\n+optional" + }, + "lun": { + "type": "integer", + "format": "int32", + "title": "Optional: FC target lun number\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "wwids": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.\n+optional" + } + }, + "description": "Represents a Fibre Channel volume.\nFibre Channel volumes can only be mounted as read/write once.\nFibre Channel volumes support ownership management and SELinux relabeling." + }, + "v1FieldsV1": { + "type": "object", + "properties": { + "Raw": { + "type": "string", + "format": "byte", + "description": "Raw is the underlying serialization of this object." + } + }, + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set,\nor a string representing a sub-field or item. The string will follow one of these four formats:\n'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map\n'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item\n'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list\n'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values\nIf a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff" + }, + "v1FlexVolumeSource": { + "type": "object", + "properties": { + "driver": { + "type": "string", + "description": "Driver is the name of the driver to use for this volume." + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "Optional: SecretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts.\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "options": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Optional: Extra command options if any.\n+optional" + } + }, + "description": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin." + }, + "v1FlockerVolumeSource": { + "type": "object", + "properties": { + "datasetName": { + "type": "string", + "title": "Name of the dataset stored as metadata -\u003e name on the dataset for Flocker\nshould be considered as deprecated\n+optional" + }, + "datasetUUID": { + "type": "string", + "title": "UUID of the dataset. This is unique identifier of a Flocker dataset\n+optional" + } + }, + "description": "Represents a Flocker volume mounted by the Flocker agent.\nOne and only one of datasetName and datasetUUID should be set.\nFlocker volumes do not support ownership management or SELinux relabeling." + }, + "v1GCEPersistentDiskVolumeSource": { + "type": "object", + "properties": { + "pdName": { + "type": "string", + "title": "Unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "partition": { + "type": "integer", + "format": "int32", + "title": "The partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as \"1\".\nSimilarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + } + }, + "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must\nalso be in the same GCE project and zone as the kubelet. A GCE PD\ncan only be mounted as read/write once or read-only many times. GCE\nPDs support ownership management and SELinux relabeling." + }, + "v1GetOptions": { + "type": "object", + "properties": { + "resourceVersion": { + "type": "string", + "description": "When specified:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv." + } + }, + "description": "GetOptions is the standard query options to the standard REST get call." + }, + "v1GitRepoVolumeSource": { + "type": "object", + "properties": { + "repository": { + "type": "string", + "title": "Repository URL" + }, + "revision": { + "type": "string", + "title": "Commit hash for the specified revision.\n+optional" + }, + "directory": { + "type": "string", + "title": "Target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name.\n+optional" + } + }, + "description": "Represents a volume that is populated with the contents of a git repository.\nGit repo volumes do not support ownership management.\nGit repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container." + }, + "v1GlusterfsVolumeSource": { + "type": "object", + "properties": { + "endpoints": { + "type": "string", + "title": "EndpointsName is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + }, + "path": { + "type": "string", + "title": "Path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod\n+optional" + } + }, + "description": "Represents a Glusterfs mount that lasts the lifetime of a pod.\nGlusterfs volumes do not support ownership management or SELinux relabeling." + }, + "v1HTTPGetAction": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path to access on the HTTP server.\n+optional" + }, + "port": { + "$ref": "#/definitions/intstrIntOrString", + "description": "Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + }, + "host": { + "type": "string", + "title": "Host name to connect to, defaults to the pod IP. You probably want to set\n\"Host\" in httpHeaders instead.\n+optional" + }, + "scheme": { + "type": "string", + "title": "Scheme to use for connecting to the host.\nDefaults to HTTP.\n+optional" + }, + "httpHeaders": { + "type": "array", + "items": { + "$ref": "#/definitions/v1HTTPHeader" + }, + "title": "Custom headers to set in the request. HTTP allows repeated headers.\n+optional" + } + }, + "description": "HTTPGetAction describes an action based on HTTP Get requests." + }, + "v1HTTPHeader": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "The header field name" + }, + "value": { + "type": "string", + "title": "The header field value" + } + }, + "title": "HTTPHeader describes a custom header to be used in HTTP probes" + }, + "v1Handler": { + "type": "object", + "properties": { + "exec": { + "$ref": "#/definitions/v1ExecAction", + "title": "One and only one of the following should be specified.\nExec specifies the action to take.\n+optional" + }, + "httpGet": { + "$ref": "#/definitions/v1HTTPGetAction", + "title": "HTTPGet specifies the http request to perform.\n+optional" + }, + "tcpSocket": { + "$ref": "#/definitions/v1TCPSocketAction", + "title": "TCPSocket specifies an action involving a TCP port.\nTCP hooks not yet supported\nTODO: implement a realistic TCP lifecycle hook\n+optional" + } + }, + "description": "Handler defines a specific action that should be taken\nTODO: pass structured data to these actions, and document that data here." + }, + "v1HostAlias": { + "type": "object", + "properties": { + "ip": { + "type": "string", + "description": "IP address of the host file entry." + }, + "hostnames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Hostnames for the above IP address." + } + }, + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the\npod's hosts file." + }, + "v1HostPathVolumeSource": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + }, + "type": { + "type": "string", + "title": "Type for HostPath Volume\nDefaults to \"\"\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n+optional" + } + }, + "description": "Represents a host path mapped into a pod.\nHost path volumes do not support ownership management or SELinux relabeling." + }, + "v1ISCSIVolumeSource": { + "type": "object", + "properties": { + "targetPortal": { + "type": "string", + "description": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260)." + }, + "iqn": { + "type": "string", + "description": "Target iSCSI Qualified Name." + }, + "lun": { + "type": "integer", + "format": "int32", + "description": "iSCSI Target Lun number." + }, + "iscsiInterface": { + "type": "string", + "title": "iSCSI Interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp).\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\n+optional" + }, + "portals": { + "type": "array", + "items": { + "type": "string" + }, + "title": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).\n+optional" + }, + "chapAuthDiscovery": { + "type": "boolean", + "format": "boolean", + "title": "whether support iSCSI Discovery CHAP authentication\n+optional" + }, + "chapAuthSession": { + "type": "boolean", + "format": "boolean", + "title": "whether support iSCSI Session CHAP authentication\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "CHAP Secret for iSCSI target and initiator authentication\n+optional" + }, + "initiatorName": { + "type": "string", + "title": "Custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n\u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.\n+optional" + } + }, + "description": "Represents an ISCSI disk.\nISCSI volumes can only be mounted as read/write once.\nISCSI volumes support ownership management and SELinux relabeling." + }, + "v1KeyToPath": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "The key to project." + }, + "path": { + "type": "string", + "description": "The relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'." + }, + "mode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on this file, must be a value between 0\nand 0777. If not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "description": "Maps a string key to a path within a volume." + }, + "v1LabelSelector": { + "type": "object", + "properties": { + "matchLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n+optional" + }, + "matchExpressions": { + "type": "array", + "items": { + "$ref": "#/definitions/v1LabelSelectorRequirement" + }, + "title": "matchExpressions is a list of label selector requirements. The requirements are ANDed.\n+optional" + } + }, + "description": "A label selector is a label query over a set of resources. The result of matchLabels and\nmatchExpressions are ANDed. An empty label selector matches all objects. A null\nlabel selector matches no objects." + }, + "v1LabelSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "key is the label key that the selector applies to.\n+patchMergeKey=key\n+patchStrategy=merge" + }, + "operator": { + "type": "string", + "description": "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist." + }, + "values": { + "type": "array", + "items": { + "type": "string" + }, + "title": "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.\n+optional" + } + }, + "description": "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values." + }, + "v1Lifecycle": { + "type": "object", + "properties": { + "postStart": { + "$ref": "#/definitions/v1Handler", + "title": "PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" + }, + "preStop": { + "$ref": "#/definitions/v1Handler", + "title": "PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The reason for termination is passed to the\nhandler. The Pod's termination grace period countdown begins before the\nPreStop hooked is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod. Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks\n+optional" + } + }, + "description": "Lifecycle describes actions that the management system should take in response to container lifecycle\nevents. For the PostStart and PreStop lifecycle handlers, management of the container blocks\nuntil the action is complete, unless the container process fails, in which case the handler is aborted." + }, + "v1ListMeta": { + "type": "object", + "properties": { + "selfLink": { + "type": "string", + "description": "selfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "String that identifies the server's internal version of this object that\ncan be used by clients to determine when objects have changed.\nValue must be treated as opaque by clients and passed unmodified back to the server.\nPopulated by the system.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "continue": { + "type": "string", + "description": "continue may be set if the user set a limit on the number of items returned, and indicates that\nthe server has more data available. The value is opaque and may be used to issue another request\nto the endpoint that served this list to retrieve the next set of available objects. Continuing a\nconsistent list may not be possible if the server configuration has changed or more than a few\nminutes have passed. The resourceVersion field returned when using this continue value will be\nidentical to the value in the first response, unless you have received this token from an error\nmessage." + }, + "remainingItemCount": { + "type": "string", + "format": "int64", + "title": "remainingItemCount is the number of subsequent items in the list which are not included in this\nlist response. If the list request contained label or field selectors, then the number of\nremaining items is unknown and the field will be left unset and omitted during serialization.\nIf the list is complete (either because it is not chunking or because this is the last chunk),\nthen there are no more remaining items and this field will be left unset and omitted during\nserialization.\nServers older than v1.15 do not set this field.\nThe intended use of the remainingItemCount is *estimating* the size of a collection. Clients\nshould not rely on the remainingItemCount to be set or to be exact.\n+optional" + } + }, + "description": "ListMeta describes metadata that synthetic resources must have, including lists and\nvarious status objects. A resource may have only one of {ObjectMeta, ListMeta}." + }, + "v1ListOptions": { + "type": "object", + "properties": { + "labelSelector": { + "type": "string", + "title": "A selector to restrict the list of returned objects by their labels.\nDefaults to everything.\n+optional" + }, + "fieldSelector": { + "type": "string", + "title": "A selector to restrict the list of returned objects by their fields.\nDefaults to everything.\n+optional" + }, + "watch": { + "type": "boolean", + "format": "boolean", + "title": "Watch for changes to the described resources and return them as a stream of\nadd, update, and remove notifications. Specify resourceVersion.\n+optional" + }, + "allowWatchBookmarks": { + "type": "boolean", + "format": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\".\nServers that do not implement bookmarks may ignore this flag and\nbookmarks are sent at the server's discretion. Clients should not\nassume bookmarks are returned at any specific interval, nor may they\nassume the server will send any BOOKMARK event during a session.\nIf this is not a watch, this field is ignored.\nIf the feature gate WatchBookmarks is not enabled in apiserver,\nthis field is ignored.\n\nThis field is beta.\n\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "When specified with a watch call, shows changes that occur after that particular version of a resource.\nDefaults to changes from the beginning of history.\nWhen specified for list:\n- if unset, then the result is returned from remote storage based on quorum-read flag;\n- if it's 0, then we simply return what we currently have in cache, no guarantee;\n- if set to non zero, then the result is at least as fresh as given rv.\n+optional" + }, + "timeoutSeconds": { + "type": "string", + "format": "int64", + "title": "Timeout for the list/watch call.\nThis limits the duration of the call, regardless of any activity or inactivity.\n+optional" + }, + "limit": { + "type": "string", + "format": "int64", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the\nserver will set the `continue` field on the list metadata to a value that can be used with the\nsame initial query to retrieve the next set of results. Setting a limit may return fewer than\nthe requested amount of items (up to zero items) in the event all requested objects are\nfiltered out and clients should only use the presence of the continue field to determine whether\nmore results are available. Servers may choose not to support the limit argument and will return\nall of the available results. If limit is specified and the continue field is empty, clients may\nassume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing\na single list call without a limit - that is, no objects created, modified, or deleted after the\nfirst request is issued will be included in any subsequent continued requests. This is sometimes\nreferred to as a consistent snapshot, and ensures that a client that is using limit to receive\nsmaller chunks of a very large result can ensure they see all possible objects. If objects are\nupdated during a chunked list the version of the object that was present at the time the first list\nresult was calculated is returned." + }, + "continue": { + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is\nserver defined, clients may only use the continue value from a previous query result with identical\nquery parameters (except for the value of continue) and the server may reject a continue value it\ndoes not recognize. If the specified continue value is no longer valid whether due to expiration\n(generally five to fifteen minutes) or a configuration change on the server, the server will\nrespond with a 410 ResourceExpired error together with a continue token. If the client needs a\nconsistent list, it must restart their list without the continue field. Otherwise, the client may\nsend another list request with the token received with the 410 error, the server will respond with\na list starting from the next key, but from the latest snapshot, which is inconsistent from the\nprevious list results - objects that are created, modified, or deleted after the first list request\nwill be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last\nresourceVersion value returned by the server and not miss any modifications." + } + }, + "description": "ListOptions is the query options to a standard REST list call." + }, + "v1LocalObjectReference": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?\n+optional" + } + }, + "description": "LocalObjectReference contains enough information to let you locate the\nreferenced object inside the same namespace." + }, + "v1ManagedFieldsEntry": { + "type": "object", + "properties": { + "manager": { + "type": "string", + "description": "Manager is an identifier of the workflow managing these fields." + }, + "operation": { + "type": "string", + "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created.\nThe only valid values for this field are 'Apply' and 'Update'." + }, + "apiVersion": { + "type": "string", + "description": "APIVersion defines the version of this resource that this field set\napplies to. The format is \"group/version\" just like the top-level\nAPIVersion field. It is necessary to track the version of a field\nset because it cannot be automatically converted." + }, + "time": { + "$ref": "#/definitions/v1Time", + "title": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'\n+optional" + }, + "fieldsType": { + "type": "string", + "title": "FieldsType is the discriminator for the different fields format and version.\nThere is currently only one possible value: \"FieldsV1\"" + }, + "fieldsV1": { + "$ref": "#/definitions/v1FieldsV1", + "title": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.\n+optional" + } + }, + "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource\nthat the fieldset applies to." + }, + "v1NFSVolumeSource": { + "type": "object", + "properties": { + "server": { + "type": "string", + "title": "Server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + }, + "path": { + "type": "string", + "title": "Path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force\nthe NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" + } + }, + "description": "Represents an NFS mount that lasts the lifetime of a pod.\nNFS volumes do not support ownership management or SELinux relabeling." + }, + "v1NodeAffinity": { + "type": "object", + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "$ref": "#/definitions/v1NodeSelector", + "title": "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to an update), the system\nmay or may not try to eventually evict the pod from its node.\n+optional" + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1PreferredSchedulingTerm" + }, + "title": "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node matches the corresponding matchExpressions; the\nnode(s) with the highest sum are the most preferred.\n+optional" + } + }, + "description": "Node affinity is a group of node affinity scheduling rules." + }, + "v1NodeSelector": { + "type": "object", + "properties": { + "nodeSelectorTerms": { + "type": "array", + "items": { + "$ref": "#/definitions/v1NodeSelectorTerm" + }, + "description": "Required. A list of node selector terms. The terms are ORed." + } + }, + "description": "A node selector represents the union of the results of one or more label queries\nover a set of nodes; that is, it represents the OR of the selectors represented\nby the node selector terms." + }, + "v1NodeSelectorRequirement": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "The label key that the selector applies to." + }, + "operator": { + "type": "string", + "description": "Represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt." + }, + "values": { + "type": "array", + "items": { + "type": "string" + }, + "title": "An array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. If the operator is Gt or Lt, the values\narray must have a single element, which will be interpreted as an integer.\nThis array is replaced during a strategic merge patch.\n+optional" + } + }, + "description": "A node selector requirement is a selector that contains values, a key, and an operator\nthat relates the key and values." + }, + "v1NodeSelectorTerm": { + "type": "object", + "properties": { + "matchExpressions": { + "type": "array", + "items": { + "$ref": "#/definitions/v1NodeSelectorRequirement" + }, + "title": "A list of node selector requirements by node's labels.\n+optional" + }, + "matchFields": { + "type": "array", + "items": { + "$ref": "#/definitions/v1NodeSelectorRequirement" + }, + "title": "A list of node selector requirements by node's fields.\n+optional" + } + }, + "description": "A null or empty node selector term matches no objects. The requirements of\nthem are ANDed.\nThe TopologySelectorTerm type implements a subset of the NodeSelectorTerm." + }, + "v1ObjectFieldSelector": { + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "title": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".\n+optional" + }, + "fieldPath": { + "type": "string", + "description": "Path of the field to select in the specified API version." + } + }, + "description": "ObjectFieldSelector selects an APIVersioned field of an object." + }, + "v1ObjectMeta": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names\n+optional" + }, + "generateName": { + "type": "string", + "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will\nNOT return a 409 - instead, it will either return 201 Created or 500 with Reason\nServerTimeout indicating a unique name could not be found in the time allotted, and the client\nshould retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional" + }, + "namespace": { + "type": "string", + "description": "Namespace defines the space within each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: http://kubernetes.io/docs/user-guide/namespaces\n+optional" + }, + "selfLink": { + "type": "string", + "description": "SelfLink is a URL representing this object.\nPopulated by the system.\nRead-only.\n\nDEPRECATED\nKubernetes will stop propagating this field in 1.20 release and the field is planned\nto be removed in 1.21 release.\n+optional" + }, + "uid": { + "type": "string", + "description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids\n+optional" + }, + "resourceVersion": { + "type": "string", + "description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "generation": { + "type": "string", + "format": "int64", + "title": "A sequence number representing a specific generation of the desired state.\nPopulated by the system. Read-only.\n+optional" + }, + "creationTimestamp": { + "$ref": "#/definitions/v1Time", + "description": "CreationTimestamp is a timestamp representing the server time when this object was\ncreated. It is not guaranteed to be set in happens-before order across separate operations.\nClients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system.\nRead-only.\nNull for lists.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n+optional" + }, + "deletionTimestamp": { + "$ref": "#/definitions/v1Time", + "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This\nfield is set by the server when a graceful deletion is requested by the user, and is not\ndirectly settable by a client. The resource is expected to be deleted (no longer visible\nfrom resource lists, and not reachable by name) after the time in this field, once the\nfinalizers list is empty. As long as the finalizers list contains items, deletion is blocked.\nOnce the deletionTimestamp is set, this value may not be unset or be set further into the\nfuture, although it may be shortened or the resource may be deleted prior to this time.\nFor example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react\nby sending a graceful termination signal to the containers in the pod. After that 30 seconds,\nthe Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,\nremove the pod from the API. In the presence of network partitions, this object may still\nexist after this timestamp, until an administrator or automated process can determine the\nresource is fully terminated.\nIf not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n+optional" + }, + "deletionGracePeriodSeconds": { + "type": "string", + "format": "int64", + "title": "Number of seconds allowed for this object to gracefully terminate before\nit will be removed from the system. Only set when deletionTimestamp is also set.\nMay only be shortened.\nRead-only.\n+optional" + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: http://kubernetes.io/docs/user-guide/labels\n+optional" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: http://kubernetes.io/docs/user-guide/annotations\n+optional" + }, + "ownerReferences": { + "type": "array", + "items": { + "$ref": "#/definitions/v1OwnerReference" + }, + "title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge" + }, + "finalizers": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\n+optional\n+patchStrategy=merge" + }, + "clusterName": { + "type": "string", + "title": "The name of the cluster which the object belongs to.\nThis is used to distinguish resources with same name and namespace in different clusters.\nThis field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.\n+optional" + }, + "managedFields": { + "type": "array", + "items": { + "$ref": "#/definitions/v1ManagedFieldsEntry" + }, + "description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\n+optional" + } + }, + "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects\nusers must create." + }, + "v1OwnerReference": { + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "description": "API version of the referent." + }, + "kind": { + "type": "string", + "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + }, + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#names" + }, + "uid": { + "type": "string", + "title": "UID of the referent.\nMore info: http://kubernetes.io/docs/user-guide/identifiers#uids" + }, + "controller": { + "type": "boolean", + "format": "boolean", + "title": "If true, this reference points to the managing controller.\n+optional" + }, + "blockOwnerDeletion": { + "type": "boolean", + "format": "boolean", + "title": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional" + } + }, + "description": "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field." + }, + "v1PersistentVolumeClaimVolumeSource": { + "type": "object", + "properties": { + "claimName": { + "type": "string", + "title": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Will force the ReadOnly setting in VolumeMounts.\nDefault false.\n+optional" + } + }, + "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.\nThis volume finds the bound PV and mounts that volume for the pod. A\nPersistentVolumeClaimVolumeSource is, essentially, a wrapper around another\ntype of volume that is owned by someone else (the system)." + }, + "v1PhotonPersistentDiskVolumeSource": { + "type": "object", + "properties": { + "pdID": { + "type": "string", + "title": "ID that identifies Photon Controller persistent disk" + }, + "fsType": { + "type": "string", + "description": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified." + } + }, + "description": "Represents a Photon Controller persistent disk resource." + }, + "v1PodAffinity": { + "type": "object", + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1PodAffinityTerm" + }, + "title": "If the affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied.\n+optional" + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1WeightedPodAffinityTerm" + }, + "title": "The scheduler will prefer to schedule pods to nodes that satisfy\nthe affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred.\n+optional" + } + }, + "description": "Pod affinity is a group of inter pod affinity scheduling rules." + }, + "v1PodAffinityTerm": { + "type": "object", + "properties": { + "labelSelector": { + "$ref": "#/definitions/v1LabelSelector", + "title": "A label query over a set of resources, in this case pods.\n+optional" + }, + "namespaces": { + "type": "array", + "items": { + "type": "string" + }, + "title": "namespaces specifies which namespaces the labelSelector applies to (matches against);\nnull or empty list means \"this pod's namespace\"\n+optional" + }, + "topologyKey": { + "type": "string", + "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching\nthe labelSelector in the specified namespaces, where co-located is defined as running on a node\nwhose value of the label with key topologyKey matches that of any node on which any of the\nselected pods is running.\nEmpty topologyKey is not allowed." + } + }, + "title": "Defines a set of pods (namely those matching the labelSelector\nrelative to the given namespace(s)) that this pod should be\nco-located (affinity) or not co-located (anti-affinity) with,\nwhere co-located is defined as running on a node whose value of\nthe label with key \u003ctopologyKey\u003e matches that of any node on which\na pod of the set of pods is running" + }, + "v1PodAntiAffinity": { + "type": "object", + "properties": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1PodAffinityTerm" + }, + "title": "If the anti-affinity requirements specified by this field are not met at\nscheduling time, the pod will not be scheduled onto the node.\nIf the anti-affinity requirements specified by this field cease to be met\nat some point during pod execution (e.g. due to a pod label update), the\nsystem may or may not try to eventually evict the pod from its node.\nWhen there are multiple elements, the lists of nodes corresponding to each\npodAffinityTerm are intersected, i.e. all terms must be satisfied.\n+optional" + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "items": { + "$ref": "#/definitions/v1WeightedPodAffinityTerm" + }, + "title": "The scheduler will prefer to schedule pods to nodes that satisfy\nthe anti-affinity expressions specified by this field, but it may choose\na node that violates one or more of the expressions. The node that is\nmost preferred is the one with the greatest sum of weights, i.e.\nfor each node that meets all of the scheduling requirements (resource\nrequest, requiredDuringScheduling anti-affinity expressions, etc.),\ncompute a sum by iterating through the elements of this field and adding\n\"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the\nnode(s) with the highest sum are the most preferred.\n+optional" + } + }, + "description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules." + }, + "v1PodSecurityContext": { + "type": "object", + "properties": { + "seLinuxOptions": { + "$ref": "#/definitions/v1SELinuxOptions", + "title": "The SELinux context to be applied to all containers.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in SecurityContext. If set in\nboth SecurityContext and PodSecurityContext, the value specified in SecurityContext\ntakes precedence for that container.\n+optional" + }, + "windowsOptions": { + "$ref": "#/definitions/v1WindowsSecurityContextOptions", + "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options within a container's SecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsUser": { + "type": "string", + "format": "int64", + "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional" + }, + "runAsGroup": { + "type": "string", + "format": "int64", + "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence\nfor that container.\n+optional" + }, + "runAsNonRoot": { + "type": "boolean", + "format": "boolean", + "title": "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in SecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "supplementalGroups": { + "type": "array", + "items": { + "type": "string", + "format": "int64" + }, + "title": "A list of groups applied to the first process run in each container, in addition\nto the container's primary GID. If unspecified, no groups will be added to\nany container.\n+optional" + }, + "fsGroup": { + "type": "string", + "format": "int64", + "description": "1. The owning GID will be the FSGroup\n2. The setgid bit is set (new files created in the volume will be owned by FSGroup)\n3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.\n+optional", + "title": "A special supplemental group that applies to all containers in a pod.\nSome volume types allow the Kubelet to change the ownership of that volume\nto be owned by the pod:" + }, + "sysctls": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Sysctl" + }, + "title": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported\nsysctls (by the container runtime) might fail to launch.\n+optional" + } + }, + "description": "PodSecurityContext holds pod-level security attributes and common container settings.\nSome fields are also present in container.securityContext. Field values of\ncontainer.securityContext take precedence over field values of PodSecurityContext." + }, + "v1PortworxVolumeSource": { + "type": "object", + "properties": { + "volumeID": { + "type": "string", + "title": "VolumeID uniquely identifies a Portworx volume" + }, + "fsType": { + "type": "string", + "description": "FSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + } + }, + "description": "PortworxVolumeSource represents a Portworx volume resource." + }, + "v1PreferredSchedulingTerm": { + "type": "object", + "properties": { + "weight": { + "type": "integer", + "format": "int32", + "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100." + }, + "preference": { + "$ref": "#/definitions/v1NodeSelectorTerm", + "description": "A node selector term, associated with the corresponding weight." + } + }, + "description": "An empty preferred scheduling term matches all objects with implicit weight 0\n(i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op)." + }, + "v1Probe": { + "type": "object", + "properties": { + "handler": { + "$ref": "#/definitions/v1Handler", + "title": "The action taken to determine the health of a container" + }, + "initialDelaySeconds": { + "type": "integer", + "format": "int32", + "title": "Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "timeoutSeconds": { + "type": "integer", + "format": "int32", + "title": "Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional" + }, + "periodSeconds": { + "type": "integer", + "format": "int32", + "title": "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.\n+optional" + }, + "successThreshold": { + "type": "integer", + "format": "int32", + "title": "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.\n+optional" + }, + "failureThreshold": { + "type": "integer", + "format": "int32", + "title": "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.\n+optional" + } + }, + "description": "Probe describes a health check to be performed against a container to determine whether it is\nalive or ready to receive traffic." + }, + "v1ProjectedVolumeSource": { + "type": "object", + "properties": { + "sources": { + "type": "array", + "items": { + "$ref": "#/definitions/v1VolumeProjection" + }, + "title": "list of volume projections" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Mode bits to use on created files by default. Must be a value between\n0 and 0777.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + } + }, + "title": "Represents a projected volume source" + }, + "v1QuobyteVolumeSource": { + "type": "object", + "properties": { + "registry": { + "type": "string", + "title": "Registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes" + }, + "volume": { + "type": "string", + "description": "Volume is a string that references an already created Quobyte volume by name." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false.\n+optional" + }, + "user": { + "type": "string", + "title": "User to map volume access to\nDefaults to serivceaccount user\n+optional" + }, + "group": { + "type": "string", + "title": "Group to map volume access to\nDefault is no group\n+optional" + }, + "tenant": { + "type": "string", + "title": "Tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin\n+optional" + } + }, + "description": "Represents a Quobyte mount that lasts the lifetime of a pod.\nQuobyte volumes do not support ownership management or SELinux relabeling." + }, + "v1RBDVolumeSource": { + "type": "object", + "properties": { + "monitors": { + "type": "array", + "items": { + "type": "string" + }, + "title": "A collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + }, + "image": { + "type": "string", + "title": "The rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + }, + "fsType": { + "type": "string", + "title": "Filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine\n+optional" + }, + "pool": { + "type": "string", + "title": "The rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "user": { + "type": "string", + "title": "The rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "keyring": { + "type": "string", + "title": "Keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "SecretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "ReadOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it\n+optional" + } + }, + "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod.\nRBD volumes support ownership management and SELinux relabeling." + }, + "v1ResourceFieldSelector": { + "type": "object", + "properties": { + "containerName": { + "type": "string", + "title": "Container name: required for volumes, optional for env vars\n+optional" + }, + "resource": { + "type": "string", + "title": "Required: resource to select" + }, + "divisor": { + "$ref": "#/definitions/resourceQuantity", + "title": "Specifies the output format of the exposed resources, defaults to \"1\"\n+optional" + } + }, + "title": "ResourceFieldSelector represents container resources (cpu, memory) and their output format" + }, + "v1ResourceRequirements": { + "type": "object", + "properties": { + "limits": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/resourceQuantity" + }, + "title": "Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/\n+optional" + }, + "requests": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/resourceQuantity" + }, + "title": "Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/\n+optional" + } + }, + "description": "ResourceRequirements describes the compute resource requirements." + }, + "v1SELinuxOptions": { + "type": "object", + "properties": { + "user": { + "type": "string", + "title": "User is a SELinux user label that applies to the container.\n+optional" + }, + "role": { + "type": "string", + "title": "Role is a SELinux role label that applies to the container.\n+optional" + }, + "type": { + "type": "string", + "title": "Type is a SELinux type label that applies to the container.\n+optional" + }, + "level": { + "type": "string", + "title": "Level is SELinux level label that applies to the container.\n+optional" + } + }, + "title": "SELinuxOptions are the labels to be applied to the container" + }, + "v1ScaleIOVolumeSource": { + "type": "object", + "properties": { + "gateway": { + "type": "string", + "description": "The host address of the ScaleIO API Gateway." + }, + "system": { + "type": "string", + "description": "The name of the storage system as configured in ScaleIO." + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "SecretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail." + }, + "sslEnabled": { + "type": "boolean", + "format": "boolean", + "title": "Flag to enable/disable SSL communication with Gateway, default false\n+optional" + }, + "protectionDomain": { + "type": "string", + "title": "The name of the ScaleIO Protection Domain for the configured storage.\n+optional" + }, + "storagePool": { + "type": "string", + "title": "The ScaleIO Storage Pool associated with the protection domain.\n+optional" + }, + "storageMode": { + "type": "string", + "title": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned.\n+optional" + }, + "volumeName": { + "type": "string", + "description": "The name of a volume already created in the ScaleIO system\nthat is associated with this volume source." + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\".\nDefault is \"xfs\".\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + } + }, + "title": "ScaleIOVolumeSource represents a persistent ScaleIO volume" + }, + "v1SecretEnvSource": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "The Secret to select from." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret must be defined\n+optional" + } + }, + "description": "SecretEnvSource selects a Secret to populate the environment\nvariables with.\n\nThe contents of the target Secret's Data field will represent the\nkey-value pairs as environment variables." + }, + "v1SecretKeySelector": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference", + "description": "The name of the secret in the pod's namespace to select from." + }, + "key": { + "type": "string", + "description": "The key of the secret to select from. Must be a valid secret key." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret or its key must be defined\n+optional" + } + }, + "description": "SecretKeySelector selects a key of a Secret." + }, + "v1SecretProjection": { + "type": "object", + "properties": { + "localObjectReference": { + "$ref": "#/definitions/v1LocalObjectReference" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret or its key must be defined\n+optional" + } + }, + "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a\nprojected volume as files using the keys in the Data field as the file names.\nNote that this is identical to a secret volume source without the default\nmode." + }, + "v1SecretVolumeSource": { + "type": "object", + "properties": { + "secretName": { + "type": "string", + "title": "Name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1KeyToPath" + }, + "title": "If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.\n+optional" + }, + "defaultMode": { + "type": "integer", + "format": "int32", + "title": "Optional: mode bits to use on created files by default. Must be a\nvalue between 0 and 0777. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.\n+optional" + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Specify whether the Secret or its keys must be defined\n+optional" + } + }, + "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume\nas files using the keys in the Data field as the file names.\nSecret volumes support ownership management and SELinux relabeling." + }, + "v1SecurityContext": { + "type": "object", + "properties": { + "capabilities": { + "$ref": "#/definitions/v1Capabilities", + "title": "The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\n+optional" + }, + "privileged": { + "type": "boolean", + "format": "boolean", + "title": "Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\n+optional" + }, + "seLinuxOptions": { + "$ref": "#/definitions/v1SELinuxOptions", + "title": "The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "windowsOptions": { + "$ref": "#/definitions/v1WindowsSecurityContextOptions", + "title": "The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsUser": { + "type": "string", + "format": "int64", + "title": "The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsGroup": { + "type": "string", + "format": "int64", + "title": "The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "runAsNonRoot": { + "type": "boolean", + "format": "boolean", + "title": "Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\n+optional" + }, + "readOnlyRootFilesystem": { + "type": "boolean", + "format": "boolean", + "title": "Whether this container has a read-only root filesystem.\nDefault is false.\n+optional" + }, + "allowPrivilegeEscalation": { + "type": "boolean", + "format": "boolean", + "title": "AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\n+optional" + }, + "procMount": { + "type": "string", + "title": "procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\n+optional" + } + }, + "description": "SecurityContext holds security configuration that will be applied to a container.\nSome fields are present in both SecurityContext and PodSecurityContext. When both\nare set, the values in SecurityContext take precedence." + }, + "v1ServiceAccountTokenProjection": { + "type": "object", + "properties": { + "audience": { + "type": "string", + "title": "Audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver.\n+optional" + }, + "expirationSeconds": { + "type": "string", + "format": "int64", + "title": "ExpirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes.\n+optional" + }, + "path": { + "type": "string", + "description": "Path is the path relative to the mount point of the file to project the\ntoken into." + } + }, + "description": "ServiceAccountTokenProjection represents a projected service account token\nvolume. This projection can be used to insert a service account token into\nthe pods runtime filesystem for use against APIs (Kubernetes API Server or\notherwise)." + }, + "v1StorageOSVolumeSource": { + "type": "object", + "properties": { + "volumeName": { + "type": "string", + "description": "VolumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace." + }, + "volumeNamespace": { + "type": "string", + "title": "VolumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod's namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to \"default\" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created.\n+optional" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\n+optional" + }, + "secretRef": { + "$ref": "#/definitions/v1LocalObjectReference", + "title": "SecretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted.\n+optional" + } + }, + "description": "Represents a StorageOS persistent volume resource." + }, + "v1Sysctl": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of a property to set" + }, + "value": { + "type": "string", + "title": "Value of a property to set" + } + }, + "title": "Sysctl defines a kernel parameter to be set" + }, + "v1TCPSocketAction": { + "type": "object", + "properties": { + "port": { + "$ref": "#/definitions/intstrIntOrString", + "description": "Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME." + }, + "host": { + "type": "string", + "title": "Optional: Host name to connect to, defaults to the pod IP.\n+optional" + } + }, + "title": "TCPSocketAction describes an action based on opening a socket" + }, + "v1Time": { + "type": "object", + "properties": { + "seconds": { + "type": "string", + "format": "int64", + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive." + }, + "nanos": { + "type": "integer", + "format": "int32", + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context." + } + }, + "description": "Time is a wrapper around time.Time which supports correct\nmarshaling to YAML and JSON. Wrappers are provided for many\nof the factory methods that the time package offers.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false" + }, + "v1Toleration": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "Key is the taint key that the toleration applies to. Empty means match all taint keys.\nIf the key is empty, operator must be Exists; this combination means to match all values and all keys.\n+optional" + }, + "operator": { + "type": "string", + "title": "Operator represents a key's relationship to the value.\nValid operators are Exists and Equal. Defaults to Equal.\nExists is equivalent to wildcard for value, so that a pod can\ntolerate all taints of a particular category.\n+optional" + }, + "value": { + "type": "string", + "title": "Value is the taint value the toleration matches to.\nIf the operator is Exists, the value should be empty, otherwise just a regular string.\n+optional" + }, + "effect": { + "type": "string", + "title": "Effect indicates the taint effect to match. Empty means match all taint effects.\nWhen specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.\n+optional" + }, + "tolerationSeconds": { + "type": "string", + "format": "int64", + "title": "TolerationSeconds represents the period of time the toleration (which must be\nof effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,\nit is not set, which means tolerate the taint forever (do not evict). Zero and\nnegative values will be treated as 0 (evict immediately) by the system.\n+optional" + } + }, + "description": "The pod this Toleration is attached to tolerates any taint that matches\nthe triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e." + }, + "v1Volume": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Volume's name.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + }, + "volumeSource": { + "$ref": "#/definitions/v1VolumeSource", + "description": "VolumeSource represents the location and type of the mounted volume.\nIf not specified, the Volume is implied to be an EmptyDir.\nThis implied behavior is deprecated and will be removed in a future version." + } + }, + "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod." + }, + "v1VolumeDevice": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "name must match the name of a persistentVolumeClaim in the pod" + }, + "devicePath": { + "type": "string", + "description": "devicePath is the path inside of the container that the device will be mapped to." + } + }, + "description": "volumeDevice describes a mapping of a raw block device within a container." + }, + "v1VolumeMount": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "This must match the Name of a Volume." + }, + "readOnly": { + "type": "boolean", + "format": "boolean", + "title": "Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false.\n+optional" + }, + "mountPath": { + "type": "string", + "description": "Path within the container at which the volume should be mounted. Must\nnot contain ':'." + }, + "subPath": { + "type": "string", + "title": "Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root).\n+optional" + }, + "mountPropagation": { + "type": "string", + "title": "mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.\n+optional" + }, + "subPathExpr": { + "type": "string", + "title": "Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive.\nThis field is beta in 1.15.\n+optional" + } + }, + "description": "VolumeMount describes a mounting of a Volume within a container." + }, + "v1VolumeProjection": { + "type": "object", + "properties": { + "secret": { + "$ref": "#/definitions/v1SecretProjection", + "title": "information about the secret data to project\n+optional" + }, + "downwardAPI": { + "$ref": "#/definitions/v1DownwardAPIProjection", + "title": "information about the downwardAPI data to project\n+optional" + }, + "configMap": { + "$ref": "#/definitions/v1ConfigMapProjection", + "title": "information about the configMap data to project\n+optional" + }, + "serviceAccountToken": { + "$ref": "#/definitions/v1ServiceAccountTokenProjection", + "title": "information about the serviceAccountToken data to project\n+optional" + } + }, + "title": "Projection that may be projected along with other supported volume types" + }, + "v1VolumeSource": { + "type": "object", + "properties": { + "hostPath": { + "$ref": "#/definitions/v1HostPathVolumeSource", + "title": "HostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write.\n+optional" + }, + "emptyDir": { + "$ref": "#/definitions/v1EmptyDirVolumeSource", + "title": "EmptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir\n+optional" + }, + "gcePersistentDisk": { + "$ref": "#/definitions/v1GCEPersistentDiskVolumeSource", + "title": "GCEPersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\n+optional" + }, + "awsElasticBlockStore": { + "$ref": "#/definitions/v1AWSElasticBlockStoreVolumeSource", + "title": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\n+optional" + }, + "gitRepo": { + "$ref": "#/definitions/v1GitRepoVolumeSource", + "title": "GitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.\n+optional" + }, + "secret": { + "$ref": "#/definitions/v1SecretVolumeSource", + "title": "Secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional" + }, + "nfs": { + "$ref": "#/definitions/v1NFSVolumeSource", + "title": "NFS represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs\n+optional" + }, + "iscsi": { + "$ref": "#/definitions/v1ISCSIVolumeSource", + "title": "ISCSI represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md\n+optional" + }, + "glusterfs": { + "$ref": "#/definitions/v1GlusterfsVolumeSource", + "title": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md\n+optional" + }, + "persistentVolumeClaim": { + "$ref": "#/definitions/v1PersistentVolumeClaimVolumeSource", + "title": "PersistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional" + }, + "rbd": { + "$ref": "#/definitions/v1RBDVolumeSource", + "title": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md\n+optional" + }, + "flexVolume": { + "$ref": "#/definitions/v1FlexVolumeSource", + "title": "FlexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin.\n+optional" + }, + "cinder": { + "$ref": "#/definitions/v1CinderVolumeSource", + "title": "Cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md\n+optional" + }, + "cephfs": { + "$ref": "#/definitions/v1CephFSVolumeSource", + "title": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime\n+optional" + }, + "flocker": { + "$ref": "#/definitions/v1FlockerVolumeSource", + "title": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running\n+optional" + }, + "downwardAPI": { + "$ref": "#/definitions/v1DownwardAPIVolumeSource", + "title": "DownwardAPI represents downward API about the pod that should populate this volume\n+optional" + }, + "fc": { + "$ref": "#/definitions/v1FCVolumeSource", + "title": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.\n+optional" + }, + "azureFile": { + "$ref": "#/definitions/v1AzureFileVolumeSource", + "title": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.\n+optional" + }, + "configMap": { + "$ref": "#/definitions/v1ConfigMapVolumeSource", + "title": "ConfigMap represents a configMap that should populate this volume\n+optional" + }, + "vsphereVolume": { + "$ref": "#/definitions/v1VsphereVirtualDiskVolumeSource", + "title": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine\n+optional" + }, + "quobyte": { + "$ref": "#/definitions/v1QuobyteVolumeSource", + "title": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime\n+optional" + }, + "azureDisk": { + "$ref": "#/definitions/v1AzureDiskVolumeSource", + "title": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.\n+optional" + }, + "photonPersistentDisk": { + "$ref": "#/definitions/v1PhotonPersistentDiskVolumeSource", + "title": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" + }, + "projected": { + "$ref": "#/definitions/v1ProjectedVolumeSource", + "title": "Items for all in one resources secrets, configmaps, and downward API" + }, + "portworxVolume": { + "$ref": "#/definitions/v1PortworxVolumeSource", + "title": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine\n+optional" + }, + "scaleIO": { + "$ref": "#/definitions/v1ScaleIOVolumeSource", + "title": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.\n+optional" + }, + "storageos": { + "$ref": "#/definitions/v1StorageOSVolumeSource", + "title": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.\n+optional" + }, + "csi": { + "$ref": "#/definitions/v1CSIVolumeSource", + "title": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).\n+optional" + } + }, + "description": "Represents the source of a volume to mount.\nOnly one of its members may be specified." + }, + "v1VsphereVirtualDiskVolumeSource": { + "type": "object", + "properties": { + "volumePath": { + "type": "string", + "title": "Path that identifies vSphere volume vmdk" + }, + "fsType": { + "type": "string", + "title": "Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.\n+optional" + }, + "storagePolicyName": { + "type": "string", + "title": "Storage Policy Based Management (SPBM) profile name.\n+optional" + }, + "storagePolicyID": { + "type": "string", + "title": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.\n+optional" + } + }, + "description": "Represents a vSphere volume resource." + }, + "v1WeightedPodAffinityTerm": { + "type": "object", + "properties": { + "weight": { + "type": "integer", + "format": "int32", + "description": "weight associated with matching the corresponding podAffinityTerm,\nin the range 1-100." + }, + "podAffinityTerm": { + "$ref": "#/definitions/v1PodAffinityTerm", + "description": "Required. A pod affinity term, associated with the corresponding weight." + } + }, + "title": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)" + }, + "v1WindowsSecurityContextOptions": { + "type": "object", + "properties": { + "gmsaCredentialSpecName": { + "type": "string", + "title": "GMSACredentialSpecName is the name of the GMSA credential spec to use.\nThis field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.\n+optional" + }, + "gmsaCredentialSpec": { + "type": "string", + "title": "GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field.\nThis field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.\n+optional" + }, + "runAsUserName": { + "type": "string", + "title": "The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nThis field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.\n+optional" + } + }, + "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials." + }, + "v1alpha1ArchiveStrategy": { + "type": "object", + "properties": { + "tar": { + "$ref": "#/definitions/v1alpha1TarStrategy" + }, + "none": { + "$ref": "#/definitions/v1alpha1NoneStrategy" + } + }, + "title": "ArchiveStrategy describes how to archive files/directory when saving artifacts" + }, + "v1alpha1Arguments": { + "type": "object", + "properties": { + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Parameter" + }, + "title": "Parameters is the list of parameters to pass to the template or workflow\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Artifact" + }, + "title": "Artifacts is the list of artifacts to pass to the template or workflow\n+patchStrategy=merge\n+patchMergeKey=name" + } + }, + "title": "Arguments to a template" + }, + "v1alpha1Artifact": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "name of the artifact. must be unique within a template's inputs/outputs." + }, + "path": { + "type": "string", + "title": "Path is the container path to the artifact" + }, + "mode": { + "type": "integer", + "format": "int32", + "description": "mode bits to use on this file, must be a value between 0 and 0777\nset when loading input artifacts." + }, + "from": { + "type": "string", + "title": "From allows an artifact to reference an artifact from a previous step" + }, + "artifactLocation": { + "$ref": "#/definitions/v1alpha1ArtifactLocation", + "title": "ArtifactLocation contains the location of the artifact" + }, + "globalName": { + "type": "string", + "title": "GlobalName exports an output artifact to the global scope, making it available as\n'{{workflow.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts" + }, + "archive": { + "$ref": "#/definitions/v1alpha1ArchiveStrategy", + "description": "Archive controls how the artifact will be saved to the artifact repository." + }, + "optional": { + "type": "boolean", + "format": "boolean", + "title": "Make Artifacts optional, if Artifacts doesn't generate or exist" + } + }, + "title": "Artifact indicates an artifact to place at a specified path" + }, + "v1alpha1ArtifactLocation": { + "type": "object", + "properties": { + "archiveLogs": { + "type": "boolean", + "format": "boolean", + "title": "ArchiveLogs indicates if the container logs should be archived" + }, + "s3": { + "$ref": "#/definitions/v1alpha1S3Artifact", + "title": "S3 contains S3 artifact location details" + }, + "git": { + "$ref": "#/definitions/v1alpha1GitArtifact", + "title": "Git contains git artifact location details" + }, + "http": { + "$ref": "#/definitions/v1alpha1HTTPArtifact", + "title": "HTTP contains HTTP artifact location details" + }, + "artifactory": { + "$ref": "#/definitions/v1alpha1ArtifactoryArtifact", + "title": "Artifactory contains artifactory artifact location details" + }, + "hdfs": { + "$ref": "#/definitions/v1alpha1HDFSArtifact", + "title": "HDFS contains HDFS artifact location details" + }, + "raw": { + "$ref": "#/definitions/v1alpha1RawArtifact", + "title": "Raw contains raw artifact location details" + } + }, + "description": "ArtifactLocation describes a location for a single or multiple artifacts.\nIt is used as single artifact in the context of inputs/outputs (e.g. outputs.artifacts.artname).\nIt is also used to describe the location of multiple artifacts such as the archive location\nof a single workflow step, which the executor will use as a default location to store its files." + }, + "v1alpha1ArtifactoryArtifact": { + "type": "object", + "properties": { + "url": { + "type": "string", + "title": "URL of the artifact" + }, + "artifactoryAuth": { + "$ref": "#/definitions/v1alpha1ArtifactoryAuth" + } + }, + "title": "ArtifactoryArtifact is the location of an artifactory artifact" + }, + "v1alpha1ArtifactoryAuth": { + "type": "object", + "properties": { + "usernameSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "UsernameSecret is the secret selector to the repository username" + }, + "passwordSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "PasswordSecret is the secret selector to the repository password" + } + }, + "title": "ArtifactoryAuth describes the secret selectors required for authenticating to artifactory" + }, + "v1alpha1ContinueOn": { + "type": "object", + "properties": { + "error": { + "type": "boolean", + "format": "boolean", + "title": "+optional" + }, + "failed": { + "type": "boolean", + "format": "boolean", + "title": "+optional" + } + }, + "description": "ContinueOn defines if a workflow should continue even if a task or step fails/errors.\nIt can be specified if the workflow should continue when the pod errors, fails or both." + }, + "v1alpha1DAGTask": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the target" + }, + "template": { + "type": "string", + "title": "Name of template to execute" + }, + "arguments": { + "$ref": "#/definitions/v1alpha1Arguments", + "title": "Arguments are the parameter and artifact arguments to the template" + }, + "templateRef": { + "$ref": "#/definitions/v1alpha1TemplateRef", + "description": "TemplateRef is the reference to the template resource to execute." + }, + "dependencies": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Dependencies are name of other targets which this depends on" + }, + "withItems": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Item" + }, + "title": "WithItems expands a task into multiple parallel tasks from the items in the list" + }, + "withParam": { + "type": "string", + "description": "WithParam expands a task into multiple parallel tasks from the value in the parameter,\nwhich is expected to be a JSON list." + }, + "withSequence": { + "$ref": "#/definitions/v1alpha1Sequence", + "title": "WithSequence expands a task into a numeric sequence" + }, + "when": { + "type": "string", + "title": "When is an expression in which the task should conditionally execute" + }, + "continueOn": { + "$ref": "#/definitions/v1alpha1ContinueOn", + "title": "ContinueOn makes argo to proceed with the following step even if this step fails.\nErrors and Failed states can be specified" + } + }, + "title": "DAGTask represents a node in the graph during DAG execution" + }, + "v1alpha1DAGTemplate": { + "type": "object", + "properties": { + "target": { + "type": "string", + "title": "Target are one or more names of targets to execute in a DAG" + }, + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1DAGTask" + }, + "title": "Tasks are a list of DAG tasks\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "failFast": { + "type": "boolean", + "format": "boolean", + "title": "This flag is for DAG logic. The DAG logic has a built-in \"fail fast\" feature to stop scheduling new steps,\nas soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed\nbefore failing the DAG itself.\nThe FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to\ncompletion (either success or failure), regardless of the failed outcomes of branches in the DAG.\nMore info and example about this feature at https://github.com/argoproj/argo/issues/1442" + } + }, + "title": "DAGTemplate is a template subtype for directed acyclic graph templates" + }, + "v1alpha1ExecutorConfig": { + "type": "object", + "properties": { + "serviceAccountName": { + "type": "string", + "description": "ServiceAccountName specifies the service account name of the executor container." + } + }, + "description": "ExecutorConfig holds configurations of an executor container." + }, + "v1alpha1GitArtifact": { + "type": "object", + "properties": { + "repo": { + "type": "string", + "title": "Repo is the git repository" + }, + "revision": { + "type": "string", + "title": "Revision is the git commit, tag, branch to checkout" + }, + "depth": { + "type": "string", + "format": "uint64", + "title": "Depth specifies clones/fetches should be shallow and include the given\nnumber of commits from the branch tip" + }, + "fetch": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Fetch specifies a number of refs that should be fetched before checkout" + }, + "usernameSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "UsernameSecret is the secret selector to the repository username" + }, + "passwordSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "PasswordSecret is the secret selector to the repository password" + }, + "sshPrivateKeySecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "SSHPrivateKeySecret is the secret selector to the repository ssh private key" + }, + "insecureIgnoreHostKey": { + "type": "boolean", + "format": "boolean", + "title": "InsecureIgnoreHostKey disables SSH strict host key checking during git clone" + } + }, + "title": "GitArtifact is the location of an git artifact" + }, + "v1alpha1HDFSArtifact": { + "type": "object", + "properties": { + "hDFSConfig": { + "$ref": "#/definitions/v1alpha1HDFSConfig" + }, + "path": { + "type": "string", + "title": "Path is a file path in HDFS" + }, + "force": { + "type": "boolean", + "format": "boolean", + "title": "Force copies a file forcibly even if it exists (default: false)" + } + }, + "title": "HDFSArtifact is the location of an HDFS artifact" + }, + "v1alpha1HDFSConfig": { + "type": "object", + "properties": { + "hDFSKrbConfig": { + "$ref": "#/definitions/v1alpha1HDFSKrbConfig" + }, + "addresses": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Addresses is accessible addresses of HDFS name nodes" + }, + "hdfsUser": { + "type": "string", + "description": "HDFSUser is the user to access HDFS file system.\nIt is ignored if either ccache or keytab is used." + } + }, + "title": "HDFSConfig is configurations for HDFS" + }, + "v1alpha1HDFSKrbConfig": { + "type": "object", + "properties": { + "krbCCacheSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "description": "KrbCCacheSecret is the secret selector for Kerberos ccache\nEither ccache or keytab can be set to use Kerberos." + }, + "krbKeytabSecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "description": "KrbKeytabSecret is the secret selector for Kerberos keytab\nEither ccache or keytab can be set to use Kerberos." + }, + "krbUsername": { + "type": "string", + "description": "KrbUsername is the Kerberos username used with Kerberos keytab\nIt must be set if keytab is used." + }, + "krbRealm": { + "type": "string", + "description": "KrbRealm is the Kerberos realm used with Kerberos keytab\nIt must be set if keytab is used." + }, + "krbConfigConfigMap": { + "$ref": "#/definitions/v1ConfigMapKeySelector", + "description": "KrbConfig is the configmap selector for Kerberos config as string\nIt must be set if either ccache or keytab is used." + }, + "krbServicePrincipalName": { + "type": "string", + "description": "KrbServicePrincipalName is the principal name of Kerberos service\nIt must be set if either ccache or keytab is used." + } + }, + "title": "HDFSKrbConfig is auth configurations for Kerberos" + }, + "v1alpha1HTTPArtifact": { + "type": "object", + "properties": { + "url": { + "type": "string", + "title": "URL of the artifact" + } + }, + "title": "HTTPArtifact allows an file served on HTTP to be placed as an input artifact in a container" + }, + "v1alpha1Inputs": { + "type": "object", + "properties": { + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Parameter" + }, + "title": "Parameters are a list of parameters passed as inputs\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Artifact" + }, + "title": "Artifact are a list of artifacts passed as inputs\n+patchStrategy=merge\n+patchMergeKey=name" + } + }, + "title": "Inputs are the mechanism for passing parameters, artifacts, volumes from one template to another" + }, + "v1alpha1Item": { + "type": "object", + "properties": { + "type": { + "type": "string", + "format": "int64" + }, + "numVal": { + "type": "string" + }, + "boolVal": { + "type": "boolean", + "format": "boolean" + }, + "strVal": { + "type": "string" + }, + "mapVal": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1alpha1ItemValue" + } + }, + "listVal": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ItemValue" + } + } + }, + "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", + "title": "Item expands a single workflow step into multiple parallel steps\nThe value of Item can be a map, string, bool, or number" + }, + "v1alpha1ItemValue": { + "type": "object", + "properties": { + "type": { + "type": "string", + "format": "int64" + }, + "numVal": { + "type": "string" + }, + "boolVal": { + "type": "boolean", + "format": "boolean" + }, + "strVal": { + "type": "string" + }, + "mapVal": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "listVal": { + "type": "array", + "items": { + "type": "string", + "format": "byte" + } + } + }, + "title": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true" + }, + "v1alpha1Metadata": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "title": "Pod metdata" + }, + "v1alpha1NoneStrategy": { + "type": "object", + "description": "NoneStrategy indicates to skip tar process and upload the files or directory tree as independent\nfiles. Note that if the artifact is a directory, the artifact driver must support the ability to\nsave/load the directory appropriately." + }, + "v1alpha1Outputs": { + "type": "object", + "properties": { + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Parameter" + }, + "title": "Parameters holds the list of output parameters produced by a step\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "artifacts": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Artifact" + }, + "title": "Artifacts holds the list of output artifacts produced by a step\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "result": { + "type": "string", + "title": "Result holds the result (stdout) of a script template" + } + }, + "title": "Outputs hold parameters, artifacts, and results from a step" + }, + "v1alpha1ParallelSteps": { + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1WorkflowStep" + } + } + } + }, + "v1alpha1Parameter": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the parameter name" + }, + "default": { + "type": "string", + "title": "Default is the default value to use for an input parameter if a value was not supplied" + }, + "value": { + "type": "string", + "title": "Value is the literal value to use for the parameter.\nIf specified in the context of an input parameter, the value takes precedence over any passed values" + }, + "valueFrom": { + "$ref": "#/definitions/v1alpha1ValueFrom", + "title": "ValueFrom is the source for the output parameter's value" + }, + "globalName": { + "type": "string", + "title": "GlobalName exports an output parameter to the global scope, making it available as\n'{{workflow.outputs.parameters.XXXX}} and in workflow.status.outputs.parameters" + } + }, + "title": "Parameter indicate a passed string parameter to a service template with an optional default value" + }, + "v1alpha1RawArtifact": { + "type": "object", + "properties": { + "data": { + "type": "string", + "title": "Data is the string contents of the artifact" + } + }, + "title": "RawArtifact allows raw string content to be placed as an artifact in a container" + }, + "v1alpha1ResourceTemplate": { + "type": "object", + "properties": { + "action": { + "type": "string", + "title": "Action is the action to perform to the resource.\nMust be one of: get, create, apply, delete, replace, patch" + }, + "mergeStrategy": { + "type": "string", + "title": "MergeStrategy is the strategy used to merge a patch. It defaults to \"strategic\"\nMust be one of: strategic, merge, json" + }, + "manifest": { + "type": "string", + "title": "Manifest contains the kubernetes manifest" + }, + "setOwnerReference": { + "type": "boolean", + "format": "boolean", + "description": "SetOwnerReference sets the reference to the workflow on the OwnerReference of generated resource." + }, + "successCondition": { + "type": "string", + "title": "SuccessCondition is a label selector expression which describes the conditions\nof the k8s resource in which it is acceptable to proceed to the following step" + }, + "failureCondition": { + "type": "string", + "title": "FailureCondition is a label selector expression which describes the conditions\nof the k8s resource in which the step was considered failed" + } + }, + "title": "ResourceTemplate is a template subtype to manipulate kubernetes resources" + }, + "v1alpha1RetryStrategy": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "format": "int32", + "title": "Limit is the maximum number of attempts when retrying a container" + } + }, + "title": "RetryStrategy provides controls on how to retry a workflow step" + }, + "v1alpha1S3Artifact": { + "type": "object", + "properties": { + "s3Bucket": { + "$ref": "#/definitions/v1alpha1S3Bucket" + }, + "key": { + "type": "string", + "title": "Key is the key in the bucket where the artifact resides" + } + }, + "title": "S3Artifact is the location of an S3 artifact" + }, + "v1alpha1S3Bucket": { + "type": "object", + "properties": { + "endpoint": { + "type": "string", + "title": "Endpoint is the hostname of the bucket endpoint" + }, + "bucket": { + "type": "string", + "title": "Bucket is the name of the bucket" + }, + "region": { + "type": "string", + "title": "Region contains the optional bucket region" + }, + "insecure": { + "type": "boolean", + "format": "boolean", + "title": "Insecure will connect to the service with TLS" + }, + "accessKeySecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "AccessKeySecret is the secret selector to the bucket's access key" + }, + "secretKeySecret": { + "$ref": "#/definitions/v1SecretKeySelector", + "title": "SecretKeySecret is the secret selector to the bucket's secret key" + }, + "roleARN": { + "type": "string", + "description": "RoleARN is the Amazon Resource Name (ARN) of the role to assume." + } + }, + "title": "S3Bucket contains the access information required for interfacing with an S3 bucket" + }, + "v1alpha1ScriptTemplate": { + "type": "object", + "properties": { + "container": { + "$ref": "#/definitions/v1Container" + }, + "source": { + "type": "string", + "title": "Source contains the source code of the script to execute" + } + }, + "title": "ScriptTemplate is a template subtype to enable scripting through code steps" + }, + "v1alpha1Sequence": { + "type": "object", + "properties": { + "count": { + "type": "string", + "title": "Count is number of elements in the sequence (default: 0). Not to be used with end" + }, + "start": { + "type": "string", + "title": "Number at which to start the sequence (default: 0)" + }, + "end": { + "type": "string", + "title": "Number at which to end the sequence (default: 0). Not to be used with Count" + }, + "format": { + "type": "string", + "title": "Format is a printf format string to format the value in the sequence" + } + }, + "title": "Sequence expands a workflow step into numeric range" + }, + "v1alpha1SuspendTemplate": { + "type": "object", + "title": "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time" + }, + "v1alpha1TarStrategy": { + "type": "object", + "title": "TarStrategy will tar and gzip the file or directory when saving" + }, + "v1alpha1Template": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the template" + }, + "template": { + "type": "string", + "description": "Template is the name of the template which is used as the base of this template." + }, + "arguments": { + "$ref": "#/definitions/v1alpha1Arguments", + "description": "Arguments hold arguments to the template." + }, + "templateRef": { + "$ref": "#/definitions/v1alpha1TemplateRef", + "description": "TemplateRef is the reference to the template resource which is used as the base of this template." + }, + "inputs": { + "$ref": "#/definitions/v1alpha1Inputs", + "title": "Inputs describe what inputs parameters and artifacts are supplied to this template" + }, + "outputs": { + "$ref": "#/definitions/v1alpha1Outputs", + "title": "Outputs describe the parameters and artifacts that this template produces" + }, + "nodeSelector": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector to schedule this step of the workflow to be\nrun on the selected node(s). Overrides the selector set at the workflow level." + }, + "affinity": { + "$ref": "#/definitions/v1Affinity", + "title": "Affinity sets the pod's scheduling constraints\nOverrides the affinity set at the workflow level (if any)" + }, + "metadata": { + "$ref": "#/definitions/v1alpha1Metadata", + "title": "Metdata sets the pods's metadata, i.e. annotations and labels" + }, + "daemon": { + "type": "boolean", + "format": "boolean", + "title": "Deamon will allow a workflow to proceed to the next step so long as the container reaches readiness" + }, + "steps": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ParallelSteps" + }, + "title": "Steps define a series of sequential/parallel workflow steps" + }, + "container": { + "$ref": "#/definitions/v1Container", + "title": "Container is the main container image to run in the pod" + }, + "script": { + "$ref": "#/definitions/v1alpha1ScriptTemplate", + "title": "Script runs a portion of code against an interpreter" + }, + "resource": { + "$ref": "#/definitions/v1alpha1ResourceTemplate", + "title": "Resource template subtype which can run k8s resources" + }, + "dag": { + "$ref": "#/definitions/v1alpha1DAGTemplate", + "title": "DAG template subtype which runs a DAG" + }, + "suspend": { + "$ref": "#/definitions/v1alpha1SuspendTemplate", + "title": "Suspend template subtype which can suspend a workflow when reaching the step" + }, + "volumes": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Volume" + }, + "title": "Volumes is a list of volumes that can be mounted by containers in a template.\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "initContainers": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1UserContainer" + }, + "title": "InitContainers is a list of containers which run before the main container.\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "sidecars": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1UserContainer" + }, + "title": "Sidecars is a list of containers which run alongside the main container\nSidecars are automatically killed when the main container completes\n+patchStrategy=merge\n+patchMergeKey=name" + }, + "archiveLocation": { + "$ref": "#/definitions/v1alpha1ArtifactLocation", + "description": "Location in which all files related to the step will be stored (logs, artifacts, etc...).\nCan be overridden by individual items in Outputs. If omitted, will use the default\nartifact repository location configured in the controller, appended with the\n\u003cworkflowname\u003e/\u003cnodename\u003e in the key." + }, + "activeDeadlineSeconds": { + "type": "string", + "format": "int64", + "description": "Optional duration in seconds relative to the StartTime that the pod may be active on a node\nbefore the system actively tries to terminate the pod; value must be positive integer\nThis field is only applicable to container and script templates." + }, + "retryStrategy": { + "$ref": "#/definitions/v1alpha1RetryStrategy", + "title": "RetryStrategy describes how to retry a template when it fails" + }, + "parallelism": { + "type": "string", + "format": "int64", + "description": "Parallelism limits the max total parallel pods that can execute at the same time within the\nboundaries of this template invocation. If additional steps/dag templates are invoked, the\npods created by those templates will not be counted towards this total." + }, + "tolerations": { + "type": "array", + "items": { + "$ref": "#/definitions/v1Toleration" + }, + "title": "Tolerations to apply to workflow pods.\n+patchStrategy=merge\n+patchMergeKey=key" + }, + "schedulerName": { + "type": "string", + "title": "If specified, the pod will be dispatched by specified scheduler.\nOr it will be dispatched by workflow scope scheduler if specified.\nIf neither specified, the pod will be dispatched by default scheduler.\n+optional" + }, + "priorityClassName": { + "type": "string", + "description": "PriorityClassName to apply to workflow pods." + }, + "priority": { + "type": "integer", + "format": "int32", + "description": "Priority to apply to workflow pods." + }, + "serviceAccountName": { + "type": "string", + "title": "ServiceAccountName to apply to workflow pods" + }, + "automountServiceAccountToken": { + "type": "boolean", + "format": "boolean", + "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods.\nServiceAccountName of ExecutorConfig must be specified if this value is false." + }, + "executor": { + "$ref": "#/definitions/v1alpha1ExecutorConfig", + "description": "Executor holds configurations of the executor container." + }, + "hostAliases": { + "type": "array", + "items": { + "$ref": "#/definitions/v1HostAlias" + }, + "title": "HostAliases is an optional list of hosts and IPs that will be injected into the pod spec\n+patchStrategy=merge\n+patchMergeKey=ip" + }, + "securityContext": { + "$ref": "#/definitions/v1PodSecurityContext", + "title": "SecurityContext holds pod-level security attributes and common container settings.\nOptional: Defaults to empty. See type description for default values of each field.\n+optional" + }, + "podSpecPatch": { + "type": "string", + "description": "PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of\ncontainer fields which are not strings (e.g. resource limits)." + } + }, + "title": "Template is a reusable and composable unit of execution in a workflow" + }, + "v1alpha1TemplateRef": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Name is the resource name of the template." + }, + "template": { + "type": "string", + "description": "Template is the name of referred template in the resource." + }, + "runtimeResolution": { + "type": "boolean", + "format": "boolean", + "description": "RuntimeResolution skips validation at creation time.\nBy enabling this option, you can create the referred workflow template before the actual runtime." + } + }, + "description": "TemplateRef is a reference of template resource." + }, + "v1alpha1UserContainer": { + "type": "object", + "properties": { + "container": { + "$ref": "#/definitions/v1Container" + }, + "mirrorVolumeMounts": { + "type": "boolean", + "format": "boolean", + "title": "MirrorVolumeMounts will mount the same volumes specified in the main container\nto the container (including artifacts), at the same mountPaths. This enables\ndind daemon to partially see the same filesystem as the main container in\norder to use features such as docker volume binding" + } + }, + "description": "UserContainer is a container specified by a user." + }, + "v1alpha1ValueFrom": { + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path in the container to retrieve an output parameter value from in container templates" + }, + "jsonPath": { + "type": "string", + "title": "JSONPath of a resource to retrieve an output parameter value from in resource templates" + }, + "jqFilter": { + "type": "string", + "title": "JQFilter expression against the resource object in resource templates" + }, + "parameter": { + "type": "string", + "title": "Parameter reference to a step or dag task in which to retrieve an output parameter value from\n(e.g. '{{steps.mystep.outputs.myparam}}')" + } + }, + "title": "ValueFrom describes a location in which to obtain the value to a parameter" + }, + "v1alpha1WorkflowStep": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the step" + }, + "template": { + "type": "string", + "title": "Template is the name of the template to execute as the step" + }, + "arguments": { + "$ref": "#/definitions/v1alpha1Arguments", + "title": "Arguments hold arguments to the template" + }, + "templateRef": { + "$ref": "#/definitions/v1alpha1TemplateRef", + "description": "TemplateRef is the reference to the template resource to execute as the step." + }, + "withItems": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Item" + }, + "title": "WithItems expands a step into multiple parallel steps from the items in the list" + }, + "withParam": { + "type": "string", + "description": "WithParam expands a step into multiple parallel steps from the value in the parameter,\nwhich is expected to be a JSON list." + }, + "withSequence": { + "$ref": "#/definitions/v1alpha1Sequence", + "title": "WithSequence expands a step into a numeric sequence" + }, + "when": { + "type": "string", + "title": "When is an expression in which the step should conditionally execute" + }, + "continueOn": { + "$ref": "#/definitions/v1alpha1ContinueOn", + "title": "ContinueOn makes argo to proceed with the following step even if this step fails.\nErrors and Failed states can be specified" + } + }, + "title": "WorkflowStep is a reference to a template to execute in a series of step" + }, + "v1alpha1WorkflowTemplate": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/v1alpha1WorkflowTemplateSpec" + } + }, + "title": "WorkflowTemplate is the definition of a workflow template resource\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object" + }, + "v1alpha1WorkflowTemplateList": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/v1ListMeta" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1WorkflowTemplate" + } + } + }, + "title": "WorkflowTemplateList is list of WorkflowTemplate resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object" + }, + "v1alpha1WorkflowTemplateSpec": { + "type": "object", + "properties": { + "templates": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Template" + }, + "description": "Templates is a list of workflow templates." + }, + "arguments": { + "$ref": "#/definitions/v1alpha1Arguments", + "description": "Arguments hold arguments to the template." + } + }, + "description": "WorkflowTemplateSpec is a spec of WorkflowTemplate." + }, + "workflowtemplateSubmitOptions": { + "type": "object", + "properties": { + "Strict": { + "type": "boolean", + "format": "boolean" + } + } + }, + "workflowtemplateWorkflowDeleteResponse": { + "type": "object", + "properties": { + "templateName": { + "type": "string" + }, + "status": { + "type": "string" + } + } + }, + "workflowtemplateWorkflowTemplateCreateRequest": { + "type": "object", + "properties": { + "namespace": { + "type": "string" + }, + "template": { + "$ref": "#/definitions/v1alpha1WorkflowTemplate" + }, + "createOptions": { + "$ref": "#/definitions/v1CreateOptions" + }, + "submitOptions": { + "$ref": "#/definitions/workflowtemplateSubmitOptions" + } + } + } + } +} diff --git a/cmd/server/workflowtemplate/workflow_template_server.go b/cmd/server/workflowtemplate/workflow_template_server.go new file mode 100644 index 000000000000..25815a8f5f3d --- /dev/null +++ b/cmd/server/workflowtemplate/workflow_template_server.go @@ -0,0 +1,182 @@ +package workflowtemplate + +import ( + context "context" + "encoding/json" + "errors" + "fmt" + common "github.com/argoproj/argo/cmd/server/common" + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/pkg/client/clientset/versioned" + wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" + "github.com/argoproj/argo/workflow/config" + "github.com/argoproj/argo/workflow/validate" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/metadata" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type WorkflowTemplateServer struct { + Namespace string + WfClientset *versioned.Clientset + KubeClientset *kubernetes.Clientset + EnableClientAuth bool + Config *config.WorkflowControllerConfig +} + +func NewWorkflowTemplateServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) *WorkflowTemplateServer { + wfTmplServer := WorkflowTemplateServer{Namespace: namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} + + return &wfTmplServer +} + +func (s *WorkflowTemplateServer) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { + md, _ := metadata.FromIncomingContext(ctx) + + if !s.EnableClientAuth { + return s.WfClientset, s.KubeClientset, nil + } + + var restConfigStr, bearerToken string + if len(md.Get(common.CLIENT_REST_CONFIG)) == 0 { + return nil, nil, errors.New("Client kubeconfig is not found") + } + restConfigStr = md.Get(common.CLIENT_REST_CONFIG)[0] + + if len(md.Get(common.AUTH_TOKEN)) > 0 { + bearerToken = md.Get(common.AUTH_TOKEN)[0] + } + + restConfig := rest.Config{} + + err := json.Unmarshal([]byte(restConfigStr), &restConfig) + if err != nil { + return nil, nil, err + } + + restConfig.BearerToken = bearerToken + + wfClientset, err := wfclientset.NewForConfig(&restConfig) + if err != nil { + log.Errorf("Failure to create WfClientset with ClientConfig '%+v': %s", restConfig, err) + return nil, nil, err + } + + clientset, err := kubernetes.NewForConfig(&restConfig) + if err != nil { + log.Errorf("Failure to create KubeClientset with ClientConfig '%+v': %s", restConfig, err) + return nil, nil, err + } + + return wfClientset, clientset, nil +} + +func (wts *WorkflowTemplateServer) Create(ctx context.Context, wftmplReq *WorkflowTemplateCreateRequest) (*v1alpha1.WorkflowTemplate, error) { + wfClient, _, err := wts.GetWFClient(ctx) + if err != nil { + return nil, err + } + namespace := wts.Namespace + if wftmplReq.Namespace != "" { + namespace = wftmplReq.Namespace + } + if wftmplReq.Template == nil { + return nil, fmt.Errorf("WorkflowTemplate is not found in Request body") + } + + err = validate.ValidateWorkflowTemplate(wfClient, namespace, wftmplReq.Template) + if err != nil { + return nil, fmt.Errorf("Failed to create workflow template: %v", err) + } + + created, err := wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace).Create(wftmplReq.Template) + + if err != nil { + return nil, err + } + + return created, err +} + +func (wts *WorkflowTemplateServer) Get(ctx context.Context, wftmplReq *WorkflowTemplateGetRequest) (*v1alpha1.WorkflowTemplate, error) { + wfClient, _, err := wts.GetWFClient(ctx) + if err != nil { + return nil, err + } + + namespace := wts.Namespace + if wftmplReq.Namespace != "" { + namespace = wftmplReq.Namespace + } + + wfTmpl, err := wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace).Get(wftmplReq.TemplateName, v1.GetOptions{}) + + if err != nil { + return nil, err + } + + return wfTmpl, err +} + +func (wts *WorkflowTemplateServer) List(ctx context.Context, wftmplReq *WorkflowTemplateListRequest) (*v1alpha1.WorkflowTemplateList, error) { + wfClient, _, err := wts.GetWFClient(ctx) + if err != nil { + return nil, err + } + + namespace := wts.Namespace + if wftmplReq.Namespace != "" { + namespace = wftmplReq.Namespace + } + + wfList, err := wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace).List(v1.ListOptions{}) + + if err != nil { + return nil, err + } + + return wfList, nil +} + +func (wts *WorkflowTemplateServer) Delete(ctx context.Context, wftmplReq *WorkflowTemplateDeleteRequest) (*WorkflowDeleteResponse, error) { + wfClient, _, err := wts.GetWFClient(ctx) + if err != nil { + return nil, err + } + + namespace := wts.Namespace + if wftmplReq.Namespace != "" { + namespace = wftmplReq.Namespace + } + + err = wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace).Delete(wftmplReq.TemplateName, &v1.DeleteOptions{}) + if err != nil { + return nil, err + } + + return &WorkflowDeleteResponse{ + TemplateName: wftmplReq.TemplateName, + Status: "Deleted", + }, nil +} + +func (wts *WorkflowTemplateServer) Lint(ctx context.Context, wftmplReq *WorkflowTemplateCreateRequest) (*v1alpha1.WorkflowTemplate, error) { + wfClient, _, err := wts.GetWFClient(ctx) + if err != nil { + return nil, err + } + + namespace := wts.Namespace + if wftmplReq.Namespace != "" { + namespace = wftmplReq.Namespace + } + + err = validate.ValidateWorkflowTemplate(wfClient, namespace, wftmplReq.Template) + if err != nil { + return nil, err + } + + return wftmplReq.Template, nil +} From 8ed639dc495e73672c716f6e6963fe2c0e357339 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Thu, 12 Dec 2019 15:46:44 -0800 Subject: [PATCH 012/421] cleanup --- cmd/client/client.go | 50 -------- cmd/server/apiserver/argoserver.go | 51 +------- cmd/server/workflow/workflow.pb.go | 140 +++++++++++----------- cmd/server/workflow/workflow.pb.gw.go | 38 +++++- cmd/server/workflow/workflow.proto | 2 +- cmd/server/workflow/workflow.swagger.json | 58 +++++---- cmd/server/workflow/workflow_server.go | 19 ++- persist/sqldb/workflow_repository.go | 23 +++- 8 files changed, 175 insertions(+), 206 deletions(-) delete mode 100644 cmd/client/client.go diff --git a/cmd/client/client.go b/cmd/client/client.go deleted file mode 100644 index a26075b73349..000000000000 --- a/cmd/client/client.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "github.com/argoproj/argo/cmd/server/workflow" - "github.com/argoproj/argo/util" - "github.com/prometheus/common/log" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -func main() { - //generate() - conn, err := grpc.Dial("localhost:8080", grpc.WithInsecure()) - if err != nil { - fmt.Println(err) - } - defer conn.Close() - client := workflow.NewWorkflowServiceClient(conn) - //wf := unmarshalWF(wfStr) - config := util.InitKubeClient() - - clientConfig := workflow.ClientConfig{ - Host: config.Host, - APIPath: config.APIPath, - TLSClientConfig: config.TLSClientConfig, - Username: config.Username, - Password: config.Password, - AuthProvider: config.AuthProvider, - } - - marshalledClientConfig, err := json.Marshal(clientConfig) - if err != nil { - log.Fatal(err) - } - - md := metadata.Pairs(workflow.CLIENT_REST_CONFIG, string(marshalledClientConfig)) - ctx := metadata.NewOutgoingContext(context.Background(), md) - fmt.Println(string(marshalledClientConfig)) - - wq := workflow.WorkflowListRequest{Namespace: "default"} - - queried, err := client.List(ctx, &wq) - if err != nil { - log.Fatal(err) - } - fmt.Println(queried) -} diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index ff0df53fabcd..096ff033e0f5 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -105,17 +105,12 @@ func (as *ArgoServer) Run(ctx context.Context, port int) { httpL = tcpm.Match(cmux.HTTP1Fast()) grpcL = tcpm.Match(cmux.Any()) } else { - // We first match on HTTP 1.1 methods. - //httpL = tcpm.Match(cmux.HTTP1Fast()) // If not matched, we assume that its TLS. tlsl := tcpm.Match(cmux.Any()) tlsConfig := tls.Config{ //Certificates: []tls.Certificate{*as.settings.Certificate}, } - //if as.TLSConfigCustomizer != nil { - // as.TLSConfigCustomizer(&tlsConfig) - //} tlsl = tls.NewListener(tlsl, &tlsConfig) @@ -124,14 +119,6 @@ func (as *ArgoServer) Run(ctx context.Context, port int) { httpsL = tlsm.Match(cmux.HTTP1Fast()) grpcL = tlsm.Match(cmux.Any()) } - //lis, err := net.Listen("tcp", fmt.Sprintf(":%d", 8083)) - //if err != nil { - // log.Fatalf("failed to listen: %v", err) - //} - //lis1, err := net.Listen("tcp", fmt.Sprintf(":%d", 8082)) - //if err != nil { - // log.Fatalf("failed to listen: %v", err) - //} go func() { as.checkServeErr("grpcServer", grpcServer.Serve(grpcL)) }() go func() { as.checkServeErr("httpServer", httpServer.Serve(httpL)) }() @@ -140,7 +127,7 @@ func (as *ArgoServer) Run(ctx context.Context, port int) { go func() { as.checkServeErr("httpsServer", httpsServer.Serve(httpsL)) }() go func() { as.checkServeErr("tlsm", tlsm.Serve()) }() } - + log.Info("Argo API Server started successfully") as.stopCh = make(chan struct{}) <-as.stopCh } @@ -211,41 +198,6 @@ func mustRegisterGWHandler(register registerFunc, ctx context.Context, mux *runt } } -//type handlerSwitcher struct { -// handler http.Handler -// contentTypeToHandler map[string]http.Handler -//} -// -//func (s *handlerSwitcher) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// if contentHandler, ok := s.contentTypeToHandler[r.Header.Get("content-type")]; ok { -// contentHandler.ServeHTTP(w, r) -// } else { -// s.handler.ServeHTTP(w, r) -// } -//} - -// Workaround for https://github.com/golang/go/issues/21955 to support escaped URLs in URL path. -//type bug21955Workaround struct { -// handler http.Handler -//} -// -//var pathPatters = []*regexp.Regexp{ -// regexp.MustCompile(`/api/v1/workflows/[^/]+`), -//} -// -//func (bf *bug21955Workaround) ServeHTTP(w http.ResponseWriter, r *http.Request) { -// for _, pattern := range pathPatters { -// if pattern.MatchString(r.URL.RawPath) { -// r.URL.Path = r.URL.RawPath -// break -// } -// } -// bf.handler.ServeHTTP(w, r) -//} - -//func bug21955WorkaroundInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { -// return handler(ctx, req) -//} // newRedirectServer returns an HTTP server which does a 307 redirect to the HTTPS server func newRedirectServer(port int) *http.Server { @@ -283,6 +235,7 @@ func (a *ArgoServer) UpdateConfig(cm *apiv1.ConfigMap) (*config.WorkflowControll return nil, errors.InternalErrorf("ConfigMap '%s' does not have key '%s'", a.ConfigName, common.WorkflowControllerConfigMapKey) } var config config.WorkflowControllerConfig + log.Infof("Config Map: %s", configStr) err := yaml.Unmarshal([]byte(configStr), &config) if err != nil { return nil, errors.InternalWrapError(err) diff --git a/cmd/server/workflow/workflow.pb.go b/cmd/server/workflow/workflow.pb.go index efe424f5a432..b548fff74506 100644 --- a/cmd/server/workflow/workflow.pb.go +++ b/cmd/server/workflow/workflow.pb.go @@ -665,77 +665,77 @@ func init() { func init() { proto.RegisterFile("cmd/server/workflow/workflow.proto", fileDescriptor_192bc67c39cca05a) } var fileDescriptor_192bc67c39cca05a = []byte{ - // 1112 bytes of a gzipped FileDescriptorProto + // 1113 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x97, 0xcf, 0x4f, 0x24, 0x45, - 0x14, 0xc7, 0x53, 0x2c, 0x0c, 0xc3, 0x63, 0xc1, 0x58, 0x2a, 0x4e, 0x5a, 0x96, 0x65, 0x3b, 0xd1, - 0x10, 0xb2, 0xe9, 0x86, 0x81, 0xd5, 0x95, 0x0d, 0xae, 0x08, 0x06, 0x4d, 0xc8, 0xee, 0xa6, 0xc1, - 0x6c, 0x30, 0x5e, 0x9a, 0x9e, 0xb7, 0x4d, 0xcb, 0x74, 0x55, 0xdb, 0x55, 0x03, 0x41, 0xc2, 0x41, - 0x0f, 0x1b, 0x3d, 0x79, 0xf0, 0xe2, 0xdd, 0xc4, 0x6c, 0x36, 0xc6, 0x93, 0x7f, 0x84, 0x47, 0x8d, - 0xff, 0x80, 0x21, 0x26, 0xc6, 0xff, 0xc2, 0x54, 0xf5, 0x6f, 0x18, 0x36, 0x33, 0x02, 0xb7, 0xae, - 0xf7, 0xfa, 0xbd, 0xfa, 0xd4, 0xf7, 0xbd, 0x79, 0x35, 0x0d, 0xa6, 0x17, 0xb6, 0x6c, 0x81, 0xf1, - 0x3e, 0xc6, 0xf6, 0x01, 0x8f, 0xf7, 0x9e, 0xb4, 0xf9, 0x41, 0xfe, 0x60, 0x45, 0x31, 0x97, 0x9c, - 0xd6, 0xb3, 0xb5, 0xf1, 0xaa, 0xcf, 0x7d, 0xae, 0x8d, 0xb6, 0x7a, 0x4a, 0xfc, 0xc6, 0xa4, 0xcf, - 0xb9, 0xdf, 0x46, 0xdb, 0x8d, 0x02, 0xdb, 0x65, 0x8c, 0x4b, 0x57, 0x06, 0x9c, 0x89, 0xd4, 0xbb, - 0xb8, 0x77, 0x57, 0x58, 0x01, 0x57, 0xde, 0xd0, 0xf5, 0x76, 0x03, 0x86, 0xf1, 0xa1, 0x1d, 0xed, - 0xf9, 0xca, 0x20, 0xec, 0x10, 0xa5, 0x6b, 0xef, 0xcf, 0xdb, 0x3e, 0x32, 0x8c, 0x5d, 0x89, 0xad, - 0x34, 0x6a, 0xd5, 0x0f, 0xe4, 0x6e, 0x67, 0xc7, 0xf2, 0x78, 0x68, 0xbb, 0xb1, 0xde, 0xf4, 0x73, - 0xfd, 0x50, 0x84, 0xe6, 0xb8, 0xfb, 0xf3, 0x6e, 0x3b, 0xda, 0x75, 0xcf, 0x26, 0x31, 0x8b, 0xad, - 0x6d, 0x8f, 0xc7, 0xd8, 0x65, 0x23, 0xf3, 0xdf, 0x01, 0x18, 0xdb, 0xec, 0xec, 0x84, 0x81, 0x7c, - 0x18, 0x69, 0x6c, 0x4a, 0x61, 0x90, 0xb9, 0x21, 0x36, 0xc8, 0x34, 0x99, 0x19, 0x71, 0xf4, 0x33, - 0x35, 0xe1, 0x7a, 0x16, 0xf8, 0x40, 0xf9, 0x06, 0xb4, 0xaf, 0x62, 0xa3, 0x53, 0x00, 0x01, 0x13, - 0xd2, 0x65, 0x1e, 0x7e, 0xbc, 0xd6, 0xb8, 0xa6, 0xdf, 0x28, 0x59, 0x94, 0x1f, 0x99, 0x8c, 0x0f, - 0x23, 0x1e, 0x30, 0xd9, 0x18, 0x4c, 0xfc, 0x85, 0x45, 0xf9, 0x23, 0x37, 0x76, 0x43, 0x94, 0x18, - 0x8b, 0xc6, 0xd0, 0xf4, 0x35, 0xe5, 0x2f, 0x2c, 0xf4, 0x2d, 0x18, 0x57, 0x85, 0x0a, 0x3c, 0x5c, - 0xf1, 0x3c, 0xde, 0x61, 0xb2, 0x51, 0xd3, 0x39, 0x4e, 0x59, 0x15, 0x6b, 0x52, 0xd0, 0xb5, 0xf8, - 0xd0, 0xe9, 0xb0, 0xc6, 0xf0, 0x34, 0x99, 0xa9, 0x3b, 0x15, 0x1b, 0x9d, 0x80, 0x5a, 0xdb, 0xdd, - 0xc1, 0xb6, 0x68, 0xd4, 0x75, 0x8e, 0x74, 0x45, 0x3f, 0x83, 0x71, 0x7e, 0xc0, 0x30, 0x76, 0xf0, - 0x09, 0xc6, 0xc8, 0x3c, 0x6c, 0x8c, 0x4c, 0x93, 0x99, 0xd1, 0xe6, 0xa2, 0x95, 0x48, 0x69, 0x95, - 0xab, 0x68, 0x45, 0x7b, 0xbe, 0x32, 0x08, 0x4b, 0x55, 0xd1, 0xda, 0x9f, 0xb7, 0x1e, 0x56, 0x62, - 0x9d, 0x53, 0xb9, 0xcc, 0xe7, 0x03, 0xf0, 0xda, 0xe3, 0xb4, 0x6a, 0xab, 0x31, 0xba, 0x12, 0x1d, - 0xfc, 0xa2, 0x83, 0x42, 0xd2, 0x49, 0x18, 0x51, 0x3a, 0x8b, 0xc8, 0xf5, 0x32, 0xe1, 0x0b, 0x03, - 0xdd, 0x86, 0xbc, 0x05, 0xb5, 0xf2, 0xa3, 0xcd, 0x65, 0xab, 0xe8, 0x0f, 0x2b, 0xeb, 0x0f, 0xfd, - 0x50, 0x40, 0xe5, 0x5d, 0x9c, 0xf5, 0x87, 0x95, 0xed, 0xed, 0xe4, 0xe9, 0xe8, 0x36, 0x8c, 0x79, - 0x9a, 0x24, 0xad, 0xbe, 0xae, 0xdb, 0x68, 0x73, 0xa1, 0xb7, 0xf3, 0xae, 0x96, 0x43, 0x9d, 0x6a, - 0x26, 0xba, 0x0c, 0x63, 0xa2, 0xdc, 0x58, 0xba, 0xe4, 0xa3, 0xcd, 0xd7, 0x0b, 0xb0, 0x4a, 0xdf, - 0x39, 0xd5, 0xb7, 0xcd, 0x67, 0x04, 0x68, 0x06, 0xbc, 0x8e, 0x32, 0x53, 0xca, 0x84, 0xeb, 0x59, - 0xfc, 0x83, 0xa2, 0x4b, 0x2b, 0xb6, 0xaa, 0x9a, 0x03, 0xa7, 0xd5, 0x7c, 0x04, 0xe0, 0xa3, 0xac, - 0x9e, 0x77, 0xae, 0xb7, 0xf3, 0xae, 0xe7, 0x71, 0x4e, 0x29, 0x87, 0xf9, 0x0d, 0x81, 0x57, 0x32, - 0xd4, 0x8d, 0x40, 0xc8, 0xde, 0xaa, 0xba, 0x09, 0xa3, 0xed, 0x40, 0xe4, 0x20, 0x49, 0x61, 0xe7, - 0x7b, 0x03, 0xd9, 0x28, 0x02, 0x9d, 0x72, 0x16, 0xb3, 0x53, 0x74, 0xd8, 0x27, 0x51, 0xab, 0xd4, - 0x61, 0x17, 0xd7, 0xcd, 0x80, 0x7a, 0x88, 0x21, 0x0f, 0xbe, 0xc4, 0x96, 0x56, 0xad, 0xee, 0xe4, - 0x6b, 0xf3, 0x8f, 0x52, 0xb1, 0x36, 0xb8, 0x7f, 0x79, 0x9b, 0x36, 0x60, 0x38, 0xe2, 0x2d, 0x1d, - 0x9c, 0x4c, 0x94, 0x6c, 0xa9, 0xe2, 0x3c, 0xce, 0xa4, 0xab, 0x14, 0x4a, 0xa7, 0x49, 0x61, 0xa0, - 0x2b, 0x00, 0x6d, 0xee, 0x67, 0xda, 0x0e, 0x69, 0x6d, 0x6f, 0x95, 0xb4, 0xb5, 0xd4, 0x3c, 0x54, - 0x4a, 0x3e, 0xe2, 0xad, 0x8d, 0xfc, 0x45, 0xa7, 0x14, 0x64, 0xfe, 0x4a, 0x0a, 0x2d, 0xd7, 0xb0, - 0x8d, 0x97, 0xa9, 0xe5, 0x36, 0x8c, 0xb5, 0x74, 0xca, 0xff, 0xf5, 0xb3, 0x5b, 0x2b, 0x87, 0x3a, - 0xd5, 0x4c, 0xe6, 0x16, 0x4c, 0x9c, 0xa6, 0x16, 0x11, 0x67, 0x02, 0x7b, 0xc2, 0x9e, 0x80, 0x9a, - 0x90, 0xae, 0xec, 0x88, 0x94, 0x39, 0x5d, 0x99, 0x0c, 0xea, 0x1b, 0xdc, 0xff, 0x50, 0x4d, 0x6b, - 0x55, 0x13, 0x25, 0x34, 0x32, 0x99, 0xa6, 0xc8, 0x96, 0xf4, 0x23, 0x18, 0x91, 0x41, 0x88, 0x9b, - 0xd2, 0x0d, 0xa3, 0xb4, 0xa1, 0x67, 0x7b, 0x3b, 0xd2, 0x56, 0x10, 0xa2, 0x53, 0x04, 0x37, 0xff, - 0x19, 0x87, 0x97, 0xb2, 0x63, 0x6c, 0x26, 0xf3, 0x9d, 0x3e, 0x25, 0x50, 0x4b, 0x26, 0x0e, 0xbd, - 0x59, 0x0c, 0x91, 0xae, 0x03, 0xd5, 0xb8, 0xd8, 0x80, 0x34, 0x27, 0xbf, 0xfe, 0xf3, 0xef, 0xef, - 0x07, 0x26, 0xcc, 0x97, 0xf5, 0xdd, 0xb9, 0x3f, 0x9f, 0x5f, 0xb6, 0x62, 0x89, 0xcc, 0xd2, 0x1f, - 0x08, 0x5c, 0x5b, 0x47, 0x49, 0x27, 0xcf, 0x52, 0x14, 0x93, 0xea, 0xa2, 0x08, 0x8b, 0x1a, 0xc1, - 0xa2, 0xb7, 0xcf, 0x20, 0xd8, 0x47, 0x79, 0x23, 0x1d, 0xdb, 0x47, 0xe5, 0xf2, 0x1d, 0xd3, 0xef, - 0x08, 0x0c, 0xaa, 0xe1, 0x40, 0x6f, 0x9c, 0x65, 0x2b, 0x8d, 0x26, 0x63, 0xe5, 0x42, 0x70, 0x2a, - 0x93, 0xf9, 0xa6, 0x06, 0xbc, 0x49, 0x6f, 0xbc, 0x10, 0x90, 0x7e, 0x45, 0xa0, 0x96, 0x34, 0x62, - 0xb7, 0xaa, 0x55, 0x7e, 0x58, 0xc6, 0xf4, 0xf9, 0x2f, 0x24, 0x3d, 0x9c, 0xa9, 0x32, 0xdb, 0x9f, - 0x2a, 0x3f, 0x11, 0x18, 0x72, 0x50, 0xf5, 0x6e, 0x17, 0x84, 0xca, 0x9c, 0xbc, 0x68, 0xd5, 0x96, - 0x35, 0xdf, 0x3b, 0x46, 0xb3, 0x1f, 0x3e, 0x3b, 0x56, 0x6c, 0xaa, 0xb3, 0x7e, 0x26, 0x50, 0x77, - 0x30, 0xb9, 0x08, 0xaf, 0x9c, 0xf5, 0x7d, 0xcd, 0xba, 0x64, 0xdc, 0xe9, 0x93, 0x35, 0xc1, 0x53, - 0xb8, 0xcf, 0x08, 0xd4, 0x14, 0x6e, 0x88, 0x57, 0x0e, 0xfb, 0x9e, 0x86, 0xbd, 0x6b, 0x2c, 0xf4, - 0x0d, 0x1b, 0xa2, 0x42, 0x7d, 0x4e, 0x60, 0x78, 0xb3, 0x23, 0x22, 0x64, 0xad, 0x2b, 0x67, 0xbd, - 0xaf, 0x59, 0xdf, 0x35, 0x16, 0xfb, 0x62, 0x15, 0x09, 0x9d, 0x82, 0xfd, 0x85, 0xc0, 0xc8, 0x16, - 0xc6, 0x61, 0xc0, 0xce, 0x19, 0x76, 0x97, 0x8a, 0xbb, 0xa2, 0x71, 0xef, 0x19, 0x6f, 0xf7, 0x85, - 0x2b, 0x33, 0x3e, 0x05, 0xfc, 0xad, 0x1e, 0x3b, 0x4c, 0x5e, 0xf9, 0x60, 0xbe, 0xa5, 0x59, 0xdf, - 0x30, 0x27, 0xce, 0xb2, 0xb6, 0x03, 0xa6, 0x9b, 0xf2, 0x29, 0x81, 0xe1, 0xe4, 0x56, 0x17, 0xdd, - 0x26, 0x74, 0xf1, 0xf7, 0xc4, 0xa0, 0x85, 0x37, 0xbb, 0xdc, 0xcc, 0x75, 0xbd, 0xc1, 0x0a, 0xbd, - 0x7f, 0x7a, 0x83, 0x17, 0x69, 0x11, 0xf1, 0x96, 0xb0, 0x8f, 0xd2, 0x7f, 0x26, 0xc7, 0x76, 0x9b, - 0xfb, 0x62, 0x8e, 0xd0, 0x1f, 0x09, 0x0c, 0x3d, 0x76, 0xa5, 0xb7, 0x7b, 0xb5, 0x17, 0xc5, 0x3d, - 0x4d, 0x7c, 0x87, 0xe6, 0xbf, 0x0c, 0x21, 0x63, 0x74, 0xc3, 0x9e, 0xaa, 0x38, 0x47, 0x3e, 0x58, - 0xfa, 0xed, 0x64, 0x8a, 0xfc, 0x7e, 0x32, 0x45, 0xfe, 0x3a, 0x99, 0x22, 0x9f, 0xde, 0x3e, 0xf7, - 0xbb, 0xb3, 0xcb, 0x87, 0xf2, 0x4e, 0x4d, 0x7f, 0x43, 0x2e, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, - 0xa3, 0x38, 0x06, 0xae, 0x46, 0x0f, 0x00, 0x00, + 0x14, 0xc7, 0x53, 0xfc, 0x18, 0x86, 0x62, 0xc1, 0x58, 0x2a, 0x4e, 0x3a, 0x2c, 0x8b, 0x9d, 0xa8, + 0x84, 0xac, 0xdd, 0x30, 0xb0, 0xba, 0xb2, 0xc1, 0x15, 0xc1, 0xa0, 0x09, 0xd9, 0xdd, 0x34, 0x98, + 0x0d, 0xc6, 0x4b, 0xd1, 0xf3, 0xb6, 0x69, 0xe9, 0xae, 0x6a, 0xbb, 0x6a, 0x20, 0x48, 0x38, 0xe8, + 0x61, 0xe3, 0xcd, 0x83, 0x31, 0xf1, 0x6e, 0x62, 0x36, 0x1b, 0xe3, 0xc9, 0x3f, 0xc2, 0xa3, 0xc6, + 0x7f, 0xc0, 0x10, 0x2f, 0x9e, 0xfd, 0x07, 0x4c, 0x55, 0xff, 0x86, 0x81, 0xcc, 0x08, 0x73, 0xeb, + 0x7a, 0xaf, 0xdf, 0xab, 0x4f, 0x7d, 0xdf, 0x9b, 0x57, 0xd3, 0xd8, 0x74, 0xc3, 0x96, 0x2d, 0x20, + 0x3e, 0x80, 0xd8, 0x3e, 0xe4, 0xf1, 0xfe, 0x93, 0x80, 0x1f, 0xe6, 0x0f, 0x56, 0x14, 0x73, 0xc9, + 0x49, 0x3d, 0x5b, 0x1b, 0x2f, 0x7b, 0xdc, 0xe3, 0xda, 0x68, 0xab, 0xa7, 0xc4, 0x6f, 0x4c, 0x79, + 0x9c, 0x7b, 0x01, 0xd8, 0x34, 0xf2, 0x6d, 0xca, 0x18, 0x97, 0x54, 0xfa, 0x9c, 0x89, 0xd4, 0xbb, + 0xb4, 0x7f, 0x57, 0x58, 0x3e, 0x57, 0xde, 0x90, 0xba, 0x7b, 0x3e, 0x83, 0xf8, 0xc8, 0x8e, 0xf6, + 0x3d, 0x65, 0x10, 0x76, 0x08, 0x92, 0xda, 0x07, 0x0b, 0xb6, 0x07, 0x0c, 0x62, 0x2a, 0xa1, 0x95, + 0x46, 0xad, 0x79, 0xbe, 0xdc, 0x6b, 0xef, 0x5a, 0x2e, 0x0f, 0x6d, 0x1a, 0xeb, 0x4d, 0x3f, 0xd7, + 0x0f, 0x45, 0x68, 0x8e, 0x7b, 0xb0, 0x40, 0x83, 0x68, 0x8f, 0x9e, 0x4f, 0x62, 0x16, 0x5b, 0xdb, + 0x2e, 0x8f, 0xa1, 0xc3, 0x46, 0xe6, 0x3f, 0x03, 0x78, 0x7c, 0xab, 0xbd, 0x1b, 0xfa, 0xf2, 0x61, + 0xa4, 0xb1, 0x09, 0xc1, 0x43, 0x8c, 0x86, 0xd0, 0x40, 0x33, 0x68, 0x76, 0xd4, 0xd1, 0xcf, 0xc4, + 0xc4, 0x37, 0xb2, 0xc0, 0x07, 0xca, 0x37, 0xa0, 0x7d, 0x15, 0x1b, 0x99, 0xc6, 0xd8, 0x67, 0x42, + 0x52, 0xe6, 0xc2, 0xc7, 0xeb, 0x8d, 0x41, 0xfd, 0x46, 0xc9, 0xa2, 0xfc, 0xc0, 0x64, 0x7c, 0x14, + 0x71, 0x9f, 0xc9, 0xc6, 0x50, 0xe2, 0x2f, 0x2c, 0xca, 0x1f, 0xd1, 0x98, 0x86, 0x20, 0x21, 0x16, + 0x8d, 0xe1, 0x99, 0x41, 0xe5, 0x2f, 0x2c, 0xe4, 0x0d, 0x3c, 0xa1, 0x0a, 0xe5, 0xbb, 0xb0, 0xea, + 0xba, 0xbc, 0xcd, 0x64, 0xa3, 0xa6, 0x73, 0x9c, 0xb1, 0x2a, 0xd6, 0xa4, 0xa0, 0xeb, 0xf1, 0x91, + 0xd3, 0x66, 0x8d, 0x91, 0x19, 0x34, 0x5b, 0x77, 0x2a, 0x36, 0x32, 0x89, 0x6b, 0x01, 0xdd, 0x85, + 0x40, 0x34, 0xea, 0x3a, 0x47, 0xba, 0x22, 0x9f, 0xe1, 0x09, 0x7e, 0xc8, 0x20, 0x76, 0xe0, 0x09, + 0xc4, 0xc0, 0x5c, 0x68, 0x8c, 0xce, 0xa0, 0xd9, 0xb1, 0xe6, 0x92, 0x95, 0x48, 0x69, 0x95, 0xab, + 0x68, 0x45, 0xfb, 0x9e, 0x32, 0x08, 0x4b, 0x55, 0xd1, 0x3a, 0x58, 0xb0, 0x1e, 0x56, 0x62, 0x9d, + 0x33, 0xb9, 0xcc, 0xe7, 0x03, 0xf8, 0x95, 0xc7, 0x69, 0xd5, 0xd6, 0x62, 0xa0, 0x12, 0x1c, 0xf8, + 0xa2, 0x0d, 0x42, 0x92, 0x29, 0x3c, 0xaa, 0x74, 0x16, 0x11, 0x75, 0x33, 0xe1, 0x0b, 0x03, 0xd9, + 0xc1, 0x79, 0x0b, 0x6a, 0xe5, 0xc7, 0x9a, 0x2b, 0x56, 0xd1, 0x1f, 0x56, 0xd6, 0x1f, 0xfa, 0xa1, + 0x80, 0xca, 0xbb, 0x38, 0xeb, 0x0f, 0x2b, 0xdb, 0xdb, 0xc9, 0xd3, 0x91, 0x1d, 0x3c, 0xee, 0x6a, + 0x92, 0xb4, 0xfa, 0xba, 0x6e, 0x63, 0xcd, 0xc5, 0xee, 0xce, 0xbb, 0x56, 0x0e, 0x75, 0xaa, 0x99, + 0xc8, 0x0a, 0x1e, 0x17, 0xe5, 0xc6, 0xd2, 0x25, 0x1f, 0x6b, 0xbe, 0x5a, 0x80, 0x55, 0xfa, 0xce, + 0xa9, 0xbe, 0x6d, 0x3e, 0x43, 0x98, 0x64, 0xc0, 0x1b, 0x20, 0x33, 0xa5, 0x4c, 0x7c, 0x23, 0x8b, + 0x7f, 0x50, 0x74, 0x69, 0xc5, 0x56, 0x55, 0x73, 0xe0, 0xac, 0x9a, 0x8f, 0x30, 0xf6, 0x40, 0x56, + 0xcf, 0x3b, 0xdf, 0xdd, 0x79, 0x37, 0xf2, 0x38, 0xa7, 0x94, 0xc3, 0xfc, 0x06, 0xe1, 0x97, 0x32, + 0xd4, 0x4d, 0x5f, 0xc8, 0xee, 0xaa, 0xba, 0x85, 0xc7, 0x02, 0x5f, 0xe4, 0x20, 0x49, 0x61, 0x17, + 0xba, 0x03, 0xd9, 0x2c, 0x02, 0x9d, 0x72, 0x16, 0xb3, 0x5d, 0x74, 0xd8, 0x27, 0x51, 0xab, 0xd4, + 0x61, 0x57, 0xd7, 0xcd, 0xc0, 0xf5, 0x10, 0x42, 0xee, 0x7f, 0x09, 0x2d, 0xad, 0x5a, 0xdd, 0xc9, + 0xd7, 0xe6, 0x1f, 0xa5, 0x62, 0x6d, 0x72, 0xef, 0xfa, 0x36, 0x6d, 0xe0, 0x91, 0x88, 0xb7, 0x74, + 0x70, 0x32, 0x51, 0xb2, 0xa5, 0x8a, 0x73, 0x39, 0x93, 0x54, 0x29, 0x94, 0x4e, 0x93, 0xc2, 0x40, + 0x56, 0x31, 0x0e, 0xb8, 0x97, 0x69, 0x3b, 0xac, 0xb5, 0x7d, 0xad, 0xa4, 0xad, 0xa5, 0xe6, 0xa1, + 0x52, 0xf2, 0x11, 0x6f, 0x6d, 0xe6, 0x2f, 0x3a, 0xa5, 0x20, 0xf3, 0x57, 0x54, 0x68, 0xb9, 0x0e, + 0x01, 0x5c, 0xa7, 0x96, 0x3b, 0x78, 0xbc, 0xa5, 0x53, 0xfe, 0xaf, 0x9f, 0xdd, 0x7a, 0x39, 0xd4, + 0xa9, 0x66, 0x32, 0xb7, 0xf1, 0xe4, 0x59, 0x6a, 0x11, 0x71, 0x26, 0xa0, 0x2b, 0xec, 0x49, 0x5c, + 0x13, 0x92, 0xca, 0xb6, 0x48, 0x99, 0xd3, 0x95, 0xc9, 0x70, 0x7d, 0x93, 0x7b, 0x1f, 0xaa, 0x69, + 0xad, 0x6a, 0xa2, 0x84, 0x06, 0x26, 0xd3, 0x14, 0xd9, 0x92, 0x7c, 0x84, 0x47, 0xa5, 0x1f, 0xc2, + 0x96, 0xa4, 0x61, 0x94, 0x36, 0xf4, 0x5c, 0x77, 0x47, 0xda, 0xf6, 0x43, 0x70, 0x8a, 0xe0, 0xe6, + 0xbf, 0x13, 0xf8, 0x85, 0xec, 0x18, 0x5b, 0xc9, 0x7c, 0x27, 0x4f, 0x11, 0xae, 0x25, 0x13, 0x87, + 0xdc, 0x2a, 0x86, 0x48, 0xc7, 0x81, 0x6a, 0x5c, 0x6d, 0x40, 0x9a, 0x53, 0x5f, 0xff, 0xf9, 0xf7, + 0x77, 0x03, 0x93, 0xe6, 0x8b, 0xfa, 0xee, 0x3c, 0x58, 0xc8, 0x2f, 0x5b, 0xb1, 0x8c, 0xe6, 0xc8, + 0x0f, 0x08, 0x0f, 0x6e, 0x80, 0x24, 0x53, 0xe7, 0x29, 0x8a, 0x49, 0x75, 0x55, 0x84, 0x25, 0x8d, + 0x60, 0x91, 0xdb, 0xe7, 0x10, 0xec, 0xe3, 0xbc, 0x91, 0x4e, 0xec, 0xe3, 0x72, 0xf9, 0x4e, 0xc8, + 0xb7, 0x08, 0x0f, 0xa9, 0xe1, 0x40, 0x6e, 0x9e, 0x67, 0x2b, 0x8d, 0x26, 0x63, 0xf5, 0x4a, 0x70, + 0x2a, 0x93, 0xf9, 0xba, 0x06, 0xbc, 0x45, 0x6e, 0x5e, 0x0a, 0x48, 0xbe, 0x42, 0xb8, 0x96, 0x34, + 0x62, 0xa7, 0xaa, 0x55, 0x7e, 0x58, 0xc6, 0xcc, 0xc5, 0x2f, 0x24, 0x3d, 0x9c, 0xa9, 0x32, 0xd7, + 0x9b, 0x2a, 0x3f, 0x21, 0x3c, 0xec, 0x80, 0xea, 0xdd, 0x0e, 0x08, 0x95, 0x39, 0x79, 0xd5, 0xaa, + 0xad, 0x68, 0xbe, 0x77, 0x8c, 0x66, 0x2f, 0x7c, 0x76, 0xac, 0xd8, 0x54, 0x67, 0xfd, 0x8c, 0x70, + 0xdd, 0x81, 0xe4, 0x22, 0xec, 0x3b, 0xeb, 0xfb, 0x9a, 0x75, 0xd9, 0xb8, 0xd3, 0x23, 0x6b, 0x82, + 0xa7, 0x70, 0x9f, 0x21, 0x5c, 0x53, 0xb8, 0x21, 0xf4, 0x1d, 0xf6, 0x3d, 0x0d, 0x7b, 0xd7, 0x58, + 0xec, 0x19, 0x36, 0x04, 0x85, 0xfa, 0x1c, 0xe1, 0x91, 0xad, 0xb6, 0x88, 0x80, 0xb5, 0xfa, 0xce, + 0x7a, 0x5f, 0xb3, 0xbe, 0x6b, 0x2c, 0xf5, 0xc4, 0x2a, 0x12, 0x3a, 0x05, 0xfb, 0x0b, 0xc2, 0xa3, + 0xdb, 0x10, 0x87, 0x3e, 0xbb, 0x60, 0xd8, 0x5d, 0x2b, 0xee, 0xaa, 0xc6, 0xbd, 0x67, 0xbc, 0xdd, + 0x13, 0xae, 0xcc, 0xf8, 0x14, 0xf0, 0xf7, 0x7a, 0xec, 0x30, 0xd9, 0xf7, 0xc1, 0xfc, 0x96, 0x66, + 0x7d, 0xd3, 0x34, 0x2f, 0x67, 0x0d, 0x7c, 0xa6, 0x1b, 0xf4, 0x29, 0xc2, 0x23, 0xc9, 0x0d, 0x2f, + 0x3a, 0x4d, 0xeb, 0xe2, 0xaf, 0x8a, 0x41, 0x0a, 0x6f, 0x76, 0xd1, 0x99, 0x1b, 0x7a, 0xb3, 0x55, + 0x72, 0xff, 0xec, 0x66, 0x97, 0xe9, 0x12, 0xf1, 0x96, 0xb0, 0x8f, 0xd3, 0x7f, 0x29, 0x27, 0x76, + 0xc0, 0x3d, 0x31, 0x8f, 0xc8, 0x8f, 0x08, 0x0f, 0x3f, 0xa6, 0xd2, 0xdd, 0xeb, 0xef, 0xa5, 0x71, + 0x4f, 0x13, 0xdf, 0x21, 0xf9, 0xaf, 0x44, 0xc8, 0x18, 0x68, 0xd8, 0x55, 0x45, 0xe7, 0xd1, 0x07, + 0xcb, 0xbf, 0x9d, 0x4e, 0xa3, 0xdf, 0x4f, 0xa7, 0xd1, 0x5f, 0xa7, 0xd3, 0xe8, 0xd3, 0xdb, 0x17, + 0x7e, 0x83, 0x76, 0xf8, 0x68, 0xde, 0xad, 0xe9, 0xef, 0xc9, 0xc5, 0xff, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x4e, 0x70, 0xc1, 0xe2, 0x52, 0x0f, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/cmd/server/workflow/workflow.pb.gw.go b/cmd/server/workflow/workflow.pb.gw.go index a3cb10e630ac..caa4d174d766 100644 --- a/cmd/server/workflow/workflow.pb.gw.go +++ b/cmd/server/workflow/workflow.pb.gw.go @@ -785,6 +785,24 @@ func request_WorkflowService_Lint_0(ctx context.Context, marshaler runtime.Marsh return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + msg, err := client.Lint(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err @@ -802,6 +820,24 @@ func local_request_WorkflowService_Lint_0(ctx context.Context, marshaler runtime return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["namespace"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace") + } + + protoReq.Namespace, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err) + } + msg, err := server.Lint(ctx, &protoReq) return msg, metadata, err @@ -1454,7 +1490,7 @@ var ( pattern_WorkflowService_Terminate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"api", "v1", "workflows", "namespace", "workflowName", "terminate"}, "", runtime.AssumeColonVerbOpt(true))) - pattern_WorkflowService_Lint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "workflows", "lint"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_WorkflowService_Lint_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "workflows", "namespace", "lint"}, "", runtime.AssumeColonVerbOpt(true))) pattern_WorkflowService_PodLogs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6, 2, 7}, []string{"api", "v1", "workflow", "namespace", "workflowName", "pods", "podName", "logs"}, "", runtime.AssumeColonVerbOpt(true))) diff --git a/cmd/server/workflow/workflow.proto b/cmd/server/workflow/workflow.proto index 715ef07735b9..54420e1e4ae7 100644 --- a/cmd/server/workflow/workflow.proto +++ b/cmd/server/workflow/workflow.proto @@ -130,7 +130,7 @@ service WorkflowService { rpc Lint (WorkflowCreateRequest) returns (github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow) { option (google.api.http) = { - post: "/api/v1/workflows/lint" + post: "/api/v1/workflows/{namespace}/lint" body: "*" }; } diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json index 3f6df0d8a6ed..600afe3a45c2 100644 --- a/cmd/server/workflow/workflow.swagger.json +++ b/cmd/server/workflow/workflow.swagger.json @@ -194,32 +194,6 @@ ] } }, - "/api/v1/workflows/lint": { - "post": { - "operationId": "Lint", - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/v1alpha1Workflow" - } - } - }, - "parameters": [ - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/workflowWorkflowCreateRequest" - } - } - ], - "tags": [ - "WorkflowService" - ] - } - }, "/api/v1/workflows/{namespace}": { "get": { "operationId": "List", @@ -304,6 +278,38 @@ ] } }, + "/api/v1/workflows/{namespace}/lint": { + "post": { + "operationId": "Lint", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Workflow" + } + } + }, + "parameters": [ + { + "name": "namespace", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/workflowWorkflowCreateRequest" + } + } + ], + "tags": [ + "WorkflowService" + ] + } + }, "/api/v1/workflows/{namespace}/{workflowName}": { "get": { "operationId": "Get", diff --git a/cmd/server/workflow/workflow_server.go b/cmd/server/workflow/workflow_server.go index 05c67844c349..90d39bbae2a6 100644 --- a/cmd/server/workflow/workflow_server.go +++ b/cmd/server/workflow/workflow_server.go @@ -41,9 +41,11 @@ func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeC wfServer := WorkflowServer{Namespace: namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} var err error if config != nil && config.Persistence != nil { - wfServer.WfDBService.wfDBctx, err = wfServer.CreatePersistenceContext(namespace, kubeClientSet, config.Persistence) + wfServer.WfDBService, err = NewDBService(kubeClientSet, namespace, config.Persistence) + //CreatePersistenceContext(namespace, kubeClientSet, config.Persistence) } if err != nil { + wfServer.WfDBService = nil log.Errorf("Error Creating DB Context. %v", err) return nil } @@ -53,7 +55,6 @@ func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeC func (s *WorkflowServer) CreatePersistenceContext(namespace string, kubeClientSet *kubernetes.Clientset, config *config.PersistConfig) (*sqldb.WorkflowDBContext, error) { var wfDBCtx sqldb.WorkflowDBContext var err error - wfDBCtx.NodeStatusOffload = config.NodeStatusOffload wfDBCtx.Session, wfDBCtx.TableName, err = sqldb.CreateDBSession(kubeClientSet, namespace, config) @@ -177,15 +178,25 @@ func (s *WorkflowServer) List(ctx context.Context, wfReq *WorkflowListRequest) ( } var wfList *v1alpha1.WorkflowList + var listOption v1.ListOptions = v1.ListOptions{} namespace := s.Namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } + if wfReq.ListOptions != nil { + listOption = *wfReq.ListOptions + } + if s.WfDBService != nil { - wfList, err = s.WfDBService.List(namespace, uint(wfReq.ListOptions.Limit), "") + var pagesize uint = 0 + if wfReq.ListOptions != nil { + pagesize = uint(wfReq.ListOptions.Limit) + } + + wfList, err = s.WfDBService.List(namespace, pagesize, "") } else { - wfList, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).List(v1.ListOptions{}) + wfList, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).List(listOption) } if err != nil { return nil, err diff --git a/persist/sqldb/workflow_repository.go b/persist/sqldb/workflow_repository.go index 6d506915be4d..30c55e347585 100644 --- a/persist/sqldb/workflow_repository.go +++ b/persist/sqldb/workflow_repository.go @@ -82,6 +82,11 @@ func (wdc *WorkflowDBContext) Save(wf *wfv1.Workflow) error { } wfdb, err := convert(wf) + if err != nil { + return err + } + + err = wdc.update(wfdb) if err != nil { if errors.IsCode(CodeDBUpdateRowNotFound, err) { return wdc.insert(wfdb) @@ -91,10 +96,7 @@ func (wdc *WorkflowDBContext) Save(wf *wfv1.Workflow) error { } } - err = wdc.update(wfdb) - if err != nil { - return err - } + log.Info("Workflow update successfully into persistence") return nil @@ -205,8 +207,19 @@ func (wdc *WorkflowDBContext) Query(condition db.Cond, orderBy ...interface{}) ( if wdc.Session == nil { return nil, DBInvalidSession(nil, "DB session is not initialized") } + var err error + //default Orderby + defaultOrderBy:= "-startedat" + if condition != nil && orderBy != nil { + err = wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(orderBy).All(&wfDBs) + }else if condition != nil && orderBy == nil { + err = wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(defaultOrderBy).All(&wfDBs) + }else if condition == nil && orderBy != nil { + err = wdc.Session.Collection(wdc.TableName).Find().OrderBy(orderBy).All(&wfDBs) + }else { + err = wdc.Session.Collection(wdc.TableName).Find().OrderBy(defaultOrderBy).All(&wfDBs) + } - err := wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(orderBy).All(&wfDBs) if err != nil { return nil, DBOperationError(err, "DB Query operation failed") } From 02a818e0133c2b6e526af4a9955e0bafc9750c41 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Fri, 13 Dec 2019 15:05:40 -0800 Subject: [PATCH 013/421] updated --- Makefile | 28 ++++++++++++++++++- cmd/server/apiserver/argoserver.go | 1 - cmd/server/workflow/workflow_server.go | 24 ++++++++++------ .../workflow_template_server.go | 8 ++++-- persist/sqldb/workflow_repository.go | 21 +++++++------- pkg/client/clientset/versioned/clientset.go | 1 + 6 files changed, 59 insertions(+), 24 deletions(-) diff --git a/Makefile b/Makefile index 823aba184e05..599e6c0911c3 100644 --- a/Makefile +++ b/Makefile @@ -46,7 +46,7 @@ endif # Build the project .PHONY: all -all: cli controller-image executor-image +all: cli controller-image executor-image argo-server .PHONY: builder-image builder-image: @@ -114,6 +114,32 @@ else endif @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)argo-server:$(IMAGE_TAG) ; fi +.PHONY: argo-server-linux-amd64 +argo-server-linux-amd64: + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-server-linux-amd64 ./cmd/server + +.PHONY: argo-server-linux-ppc64le +argo-server-linux-ppc64le: + CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-server-linux-ppc64le ./cmd/server + +.PHONY: argo-server-linux-s390x +argo-server-linux-s390x: + CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-server-linux-s390x ./cmd/server + +.PHONY: argo-server-linux +argo-server-linux: argo-server-linux-amd64 argo-server-linux-ppc64le argo-server-linux-s390x + +.PHONY: argo-server-darwin +argo-server-darwin: + CGO_ENABLED=0 GOOS=darwin go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-server-darwin-amd64 ./cmd/server + +.PHONY: argo-server-windows +argo-server-windows: + CGO_ENABLED=0 GOARCH=amd64 GOOS=windows go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-server-windows-amd64 ./cmd/server + + + + .PHONY: executor executor: diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index 096ff033e0f5..2803120e1f89 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -198,7 +198,6 @@ func mustRegisterGWHandler(register registerFunc, ctx context.Context, mux *runt } } - // newRedirectServer returns an HTTP server which does a 307 redirect to the HTTPS server func newRedirectServer(port int) *http.Server { return &http.Server{ diff --git a/cmd/server/workflow/workflow_server.go b/cmd/server/workflow/workflow_server.go index 90d39bbae2a6..8be32a2a1381 100644 --- a/cmd/server/workflow/workflow_server.go +++ b/cmd/server/workflow/workflow_server.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/argoproj/argo/workflow/templateresolution" "strings" log "github.com/sirupsen/logrus" @@ -39,16 +40,17 @@ type WorkflowServer struct { func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) *WorkflowServer { wfServer := WorkflowServer{Namespace: namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} - var err error if config != nil && config.Persistence != nil { + var err error wfServer.WfDBService, err = NewDBService(kubeClientSet, namespace, config.Persistence) - //CreatePersistenceContext(namespace, kubeClientSet, config.Persistence) - } - if err != nil { - wfServer.WfDBService = nil - log.Errorf("Error Creating DB Context. %v", err) - return nil + if err != nil { + wfServer.WfDBService = nil + log.Errorf("Error Creating DB Context. %v", err) + }else { + log.Infof("DB Context created successfully") + } } + return &wfServer } @@ -109,6 +111,7 @@ func (s *WorkflowServer) GetWFClient(ctx context.Context) (*versioned.Clientset, func (s *WorkflowServer) Create(ctx context.Context, wfReq *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { wfClient, _, err := s.GetWFClient(ctx) + if err != nil { return nil, err } @@ -127,7 +130,9 @@ func (s *WorkflowServer) Create(ctx context.Context, wfReq *WorkflowCreateReques return nil, err } - err = validate.ValidateWorkflow(wfClient, namespace, wf, validate.ValidateOpts{}) + wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace)) + + err = validate.ValidateWorkflow(wftmplGetter, wf, validate.ValidateOpts{}) if err != nil { return nil, err } @@ -369,8 +374,9 @@ func (s *WorkflowServer) Lint(ctx context.Context, wfReq *WorkflowCreateRequest) if wfReq.Namespace != "" { namespace = wfReq.Namespace } + wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace)) - err = validate.ValidateWorkflow(wfClient, namespace, wfReq.Workflow, validate.ValidateOpts{}) + err = validate.ValidateWorkflow(wftmplGetter, wfReq.Workflow, validate.ValidateOpts{}) if err != nil { return nil, err } diff --git a/cmd/server/workflowtemplate/workflow_template_server.go b/cmd/server/workflowtemplate/workflow_template_server.go index 25815a8f5f3d..2905df26a4eb 100644 --- a/cmd/server/workflowtemplate/workflow_template_server.go +++ b/cmd/server/workflowtemplate/workflow_template_server.go @@ -10,6 +10,7 @@ import ( "github.com/argoproj/argo/pkg/client/clientset/versioned" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" "github.com/argoproj/argo/workflow/config" + "github.com/argoproj/argo/workflow/templateresolution" "github.com/argoproj/argo/workflow/validate" log "github.com/sirupsen/logrus" "google.golang.org/grpc/metadata" @@ -85,8 +86,10 @@ func (wts *WorkflowTemplateServer) Create(ctx context.Context, wftmplReq *Workfl if wftmplReq.Template == nil { return nil, fmt.Errorf("WorkflowTemplate is not found in Request body") } + wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace)) - err = validate.ValidateWorkflowTemplate(wfClient, namespace, wftmplReq.Template) + + err = validate.ValidateWorkflowTemplate(wftmplGetter, wftmplReq.Template) if err != nil { return nil, fmt.Errorf("Failed to create workflow template: %v", err) } @@ -172,8 +175,9 @@ func (wts *WorkflowTemplateServer) Lint(ctx context.Context, wftmplReq *Workflow if wftmplReq.Namespace != "" { namespace = wftmplReq.Namespace } + wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace)) - err = validate.ValidateWorkflowTemplate(wfClient, namespace, wftmplReq.Template) + err = validate.ValidateWorkflowTemplate(wftmplGetter, wftmplReq.Template) if err != nil { return nil, err } diff --git a/persist/sqldb/workflow_repository.go b/persist/sqldb/workflow_repository.go index 30c55e347585..10403eb7685d 100644 --- a/persist/sqldb/workflow_repository.go +++ b/persist/sqldb/workflow_repository.go @@ -96,8 +96,6 @@ func (wdc *WorkflowDBContext) Save(wf *wfv1.Workflow) error { } } - - log.Info("Workflow update successfully into persistence") return nil } @@ -209,15 +207,16 @@ func (wdc *WorkflowDBContext) Query(condition db.Cond, orderBy ...interface{}) ( } var err error //default Orderby - defaultOrderBy:= "-startedat" - if condition != nil && orderBy != nil { - err = wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(orderBy).All(&wfDBs) - }else if condition != nil && orderBy == nil { - err = wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(defaultOrderBy).All(&wfDBs) - }else if condition == nil && orderBy != nil { - err = wdc.Session.Collection(wdc.TableName).Find().OrderBy(orderBy).All(&wfDBs) - }else { - err = wdc.Session.Collection(wdc.TableName).Find().OrderBy(defaultOrderBy).All(&wfDBs) + var queryOrderBy []interface{} + queryOrderBy = append(queryOrderBy, "-startedat") + + if orderBy != nil { + queryOrderBy = orderBy + } + if condition != nil { + err = wdc.Session.Collection(wdc.TableName).Find(condition).OrderBy(queryOrderBy).All(&wfDBs) + } else { + err = wdc.Session.Collection(wdc.TableName).Find().OrderBy(queryOrderBy).All(&wfDBs) } if err != nil { diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index d67394f8553a..530f8f837986 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -23,6 +23,7 @@ type Clientset struct { argoprojV1alpha1 *argoprojv1alpha1.ArgoprojV1alpha1Client } + // ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface { return c.argoprojV1alpha1 From ec16860662dfca34c9efac4368098973987f4924 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Sun, 15 Dec 2019 22:41:30 -0800 Subject: [PATCH 014/421] Aligned import statements --- cmd/server/apiserver/argoserver.go | 29 ++++++++++--------- cmd/server/main.go | 18 +++++++----- cmd/server/workflow/workflow_service.go | 12 ++++---- .../workflow_template_server.go | 13 +++++---- 4 files changed, 40 insertions(+), 32 deletions(-) diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index 2803120e1f89..22be6b115f57 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -2,30 +2,31 @@ package apiserver import ( "crypto/tls" - "github.com/argoproj/argo/cmd/server/workflow" - "github.com/argoproj/argo/cmd/server/workflowtemplate" - "github.com/argoproj/argo/errors" - "github.com/argoproj/argo/pkg/apiclient" - "github.com/argoproj/argo/pkg/client/clientset/versioned" - "github.com/argoproj/argo/util/json" - "github.com/argoproj/argo/workflow/common" - "github.com/argoproj/argo/workflow/config" + "fmt" + "net" + "net/http" + "time" + golang_proto "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" log "github.com/sirupsen/logrus" + "github.com/soheilhy/cmux" "golang.org/x/net/context" "google.golang.org/grpc" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "net" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/yaml" - "fmt" - "github.com/soheilhy/cmux" - "k8s.io/client-go/kubernetes" - "net/http" - "time" + "github.com/argoproj/argo/cmd/server/workflow" + "github.com/argoproj/argo/cmd/server/workflowtemplate" + "github.com/argoproj/argo/errors" + "github.com/argoproj/argo/pkg/apiclient" + "github.com/argoproj/argo/pkg/client/clientset/versioned" + "github.com/argoproj/argo/util/json" + "github.com/argoproj/argo/workflow/common" + "github.com/argoproj/argo/workflow/config" ) type ArgoServer struct { diff --git a/cmd/server/main.go b/cmd/server/main.go index 3b371437b8b6..592dd742f8dd 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -2,11 +2,10 @@ package main import ( "fmt" - "github.com/argoproj/argo/cmd/server/apiserver" - wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" - cmdutil "github.com/argoproj/argo/util/cmd" - "github.com/argoproj/pkg/cli" - kubecli "github.com/argoproj/pkg/kube/cli" + "os" + "strconv" + "time" + "github.com/argoproj/pkg/stats" "github.com/spf13/cobra" "golang.org/x/net/context" @@ -16,9 +15,12 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" _ "k8s.io/client-go/plugin/pkg/client/auth/openstack" "k8s.io/client-go/tools/clientcmd" - "os" - "strconv" - "time" + + "github.com/argoproj/argo/cmd/server/apiserver" + wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" + cmdutil "github.com/argoproj/argo/util/cmd" + "github.com/argoproj/pkg/cli" + kubecli "github.com/argoproj/pkg/kube/cli" ) const ( diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go index 09a26a58457c..1e051ceecb97 100644 --- a/cmd/server/workflow/workflow_service.go +++ b/cmd/server/workflow/workflow_service.go @@ -3,17 +3,19 @@ package workflow import ( "encoding/json" "errors" - "github.com/argoproj/argo/cmd/server/common" - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo/pkg/client/clientset/versioned" - wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" - "github.com/argoproj/argo/workflow/util" log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc/metadata" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + + "github.com/argoproj/argo/cmd/server/common" + "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/pkg/client/clientset/versioned" + wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" + "github.com/argoproj/argo/workflow/util" + ) type KubeService struct { diff --git a/cmd/server/workflowtemplate/workflow_template_server.go b/cmd/server/workflowtemplate/workflow_template_server.go index 2905df26a4eb..da19165a5f97 100644 --- a/cmd/server/workflowtemplate/workflow_template_server.go +++ b/cmd/server/workflowtemplate/workflow_template_server.go @@ -5,6 +5,14 @@ import ( "encoding/json" "errors" "fmt" + + + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/metadata" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + common "github.com/argoproj/argo/cmd/server/common" "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned" @@ -12,11 +20,6 @@ import ( "github.com/argoproj/argo/workflow/config" "github.com/argoproj/argo/workflow/templateresolution" "github.com/argoproj/argo/workflow/validate" - log "github.com/sirupsen/logrus" - "google.golang.org/grpc/metadata" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" ) type WorkflowTemplateServer struct { From 38d7f88e90b603fb7905bce7ca34e37004039d07 Mon Sep 17 00:00:00 2001 From: Saravanan Balasubramanian Date: Mon, 16 Dec 2019 06:59:04 -0800 Subject: [PATCH 015/421] Fixed test --- cmd/server/workflow/workflow_service_test.go | 175 ------------------- persist/sqldb/mocks/DBRepository.go | 88 +++++++--- 2 files changed, 60 insertions(+), 203 deletions(-) delete mode 100644 cmd/server/workflow/workflow_service_test.go diff --git a/cmd/server/workflow/workflow_service_test.go b/cmd/server/workflow/workflow_service_test.go deleted file mode 100644 index 7b7b5ac2c62b..000000000000 --- a/cmd/server/workflow/workflow_service_test.go +++ /dev/null @@ -1,175 +0,0 @@ -package workflow - -import ( - "fmt" - wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "github.com/stretchr/testify/assert" - "sigs.k8s.io/yaml" - "testing" -) - -var wf = ` -apiVersion: argoproj.io/v1alpha1 -kind: Workflow -metadata: - creationTimestamp: "2019-09-16T22:56:45Z" - generateName: scripts-bash- - generation: 9 - labels: - workflows.argoproj.io/completed: "true" - workflows.argoproj.io/phase: Failed - name: scripts-bash-5ksp4 - namespace: default - resourceVersion: "1414877" - selfLink: /apis/argoproj.io/v1alpha1/namespaces/default/workflows/scripts-bash-5ksp4 - uid: 41a16c4b-d8d5-11e9-8938-025000000001 -spec: - arguments: {} - entrypoint: bash-script-example - templates: - - arguments: {} - inputs: {} - metadata: {} - name: bash-script-example - outputs: {} - steps: - - - arguments: {} - name: generate - template: gen-random-int - - - arguments: - parameters: - - name: message - value: '{{steps.generate.outputs.result}}' - name: print - template: print-message - - arguments: {} - inputs: {} - metadata: {} - name: gen-random-int - outputs: {} - script: - command: - - bash - image: debian:9.4 - name: "" - resources: {} - source: | - cat /dev/urandom | od -N2 -An -i | awk -v f=1 -v r=100 '{printf "%i\n", f + r * $1 / 65536}' - - arguments: {} - container: - args: - - 'echo -e " apiVersion: policy/v1beta1 kind: PodDisruptionBudget metadata: - name: zk-pdb spec: minAvailable: 2 selector: matchLabels: workflows.argoproj.io/workflow: - {{workflow.name}} " | tee pdb.yaml |sleep 120|kubectl create -f pdb.yaml ' - command: - - sh - - -c - image: lachlanevenson/k8s-kubectl - name: "" - resources: {} - inputs: - parameters: - - name: message - metadata: {} - name: print-message - outputs: {} -status: - finishedAt: "2019-09-16T22:58:59Z" - message: child 'scripts-bash-5ksp4-1961198978' failed - nodes: - scripts-bash-5ksp4: - children: - - scripts-bash-5ksp4-2570590690 - displayName: scripts-bash-5ksp4 - finishedAt: "2019-09-16T22:58:59Z" - id: scripts-bash-5ksp4 - message: child 'scripts-bash-5ksp4-1961198978' failed - name: scripts-bash-5ksp4 - outboundNodes: - - scripts-bash-5ksp4-1961198978 - phase: Failed - startedAt: "2019-09-16T22:56:45Z" - templateName: bash-script-example - type: Steps - scripts-bash-5ksp4-315841411: - boundaryID: scripts-bash-5ksp4 - children: - - scripts-bash-5ksp4-3576997567 - displayName: generate - finishedAt: "2019-09-16T22:56:51Z" - id: scripts-bash-5ksp4-315841411 - name: scripts-bash-5ksp4[0].generate - outputs: - result: "50" - phase: Succeeded - startedAt: "2019-09-16T22:56:45Z" - templateName: gen-random-int - type: Pod - scripts-bash-5ksp4-1961198978: - boundaryID: scripts-bash-5ksp4 - displayName: print - finishedAt: "2019-09-16T22:58:58Z" - id: scripts-bash-5ksp4-1961198978 - inputs: - parameters: - - name: message - value: "50" - message: failed with exit code 1 - name: scripts-bash-5ksp4[1].print - phase: Failed - startedAt: "2019-09-16T22:56:53Z" - templateName: print-message - type: Pod - scripts-bash-5ksp4-2570590690: - boundaryID: scripts-bash-5ksp4 - children: - - scripts-bash-5ksp4-315841411 - displayName: '[0]' - finishedAt: "2019-09-16T22:56:53Z" - id: scripts-bash-5ksp4-2570590690 - name: scripts-bash-5ksp4[0] - phase: Succeeded - startedAt: "2019-09-16T22:56:45Z" - templateName: bash-script-example - type: StepGroup - scripts-bash-5ksp4-3576997567: - boundaryID: scripts-bash-5ksp4 - children: - - scripts-bash-5ksp4-1961198978 - displayName: '[1]' - finishedAt: "2019-09-16T22:58:59Z" - id: scripts-bash-5ksp4-3576997567 - message: child 'scripts-bash-5ksp4-1961198978' failed - name: scripts-bash-5ksp4[1] - phase: Failed - startedAt: "2019-09-16T22:56:53Z" - templateName: bash-script-example - type: StepGroup - phase: Failed - startedAt: "2019-09-16T22:56:45Z" -` - -func unmarshalWF(yamlStr string) *wfv1.Workflow { - var wf wfv1.Workflow - err := yaml.Unmarshal([]byte(yamlStr), &wf) - if err != nil { - panic(err) - } - return &wf -} - -func TestMarshalling(t *testing.T) { - - workf := unmarshalWF(wf) - - wr := WorkflowResponse{Workflows: workf} - bytes, err := wr.Marshal() - if err != nil { - - } - wr1 := WorkflowResponse{} - wr1.Unmarshal(bytes) - fmt.Println(wr1) - assert.Equal(t, wr, wr1) - -} diff --git a/persist/sqldb/mocks/DBRepository.go b/persist/sqldb/mocks/DBRepository.go index a795e0d8d22d..300cf9a90365 100644 --- a/persist/sqldb/mocks/DBRepository.go +++ b/persist/sqldb/mocks/DBRepository.go @@ -2,9 +2,12 @@ package mocks -import mock "github.com/stretchr/testify/mock" +import ( + mock "github.com/stretchr/testify/mock" + db "upper.io/db.v3" -import v1alpha1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + v1alpha1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" +) // DBRepository is an autogenerated mock type for the DBRepository type type DBRepository struct { @@ -25,6 +28,20 @@ func (_m *DBRepository) Close() error { return r0 } +// Delete provides a mock function with given fields: condition +func (_m *DBRepository) Delete(condition db.Cond) error { + ret := _m.Called(condition) + + var r0 error + if rf, ok := ret.Get(0).(func(db.Cond) error); ok { + r0 = rf(condition) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // Get provides a mock function with given fields: uid func (_m *DBRepository) Get(uid string) (*v1alpha1.Workflow, error) { ret := _m.Called(uid) @@ -48,8 +65,8 @@ func (_m *DBRepository) Get(uid string) (*v1alpha1.Workflow, error) { return r0, r1 } -// IsInterfaceNil provides a mock function with given fields: -func (_m *DBRepository) IsInterfaceNil() bool { +// IsNodeStatusOffload provides a mock function with given fields: +func (_m *DBRepository) IsNodeStatusOffload() bool { ret := _m.Called() var r0 bool @@ -62,27 +79,39 @@ func (_m *DBRepository) IsInterfaceNil() bool { return r0 } -// IsNodeStatusOffload provides a mock function with given fields: -func (_m *DBRepository) IsNodeStatusOffload() bool { - ret := _m.Called() +// List provides a mock function with given fields: orderBy +func (_m *DBRepository) List(orderBy interface{}) (*v1alpha1.WorkflowList, error) { + ret := _m.Called(orderBy) - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() + var r0 *v1alpha1.WorkflowList + if rf, ok := ret.Get(0).(func(interface{}) *v1alpha1.WorkflowList); ok { + r0 = rf(orderBy) } else { - r0 = ret.Get(0).(bool) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1alpha1.WorkflowList) + } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(interface{}) error); ok { + r1 = rf(orderBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// List provides a mock function with given fields: -func (_m *DBRepository) List() ([]v1alpha1.Workflow, error) { - ret := _m.Called() +// Query provides a mock function with given fields: condition, orderBy +func (_m *DBRepository) Query(condition db.Cond, orderBy ...interface{}) ([]v1alpha1.Workflow, error) { + var _ca []interface{} + _ca = append(_ca, condition) + _ca = append(_ca, orderBy...) + ret := _m.Called(_ca...) var r0 []v1alpha1.Workflow - if rf, ok := ret.Get(0).(func() []v1alpha1.Workflow); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(db.Cond, ...interface{}) []v1alpha1.Workflow); ok { + r0 = rf(condition, orderBy...) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]v1alpha1.Workflow) @@ -90,8 +119,8 @@ func (_m *DBRepository) List() ([]v1alpha1.Workflow, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(db.Cond, ...interface{}) error); ok { + r1 = rf(condition, orderBy...) } else { r1 = ret.Error(1) } @@ -99,22 +128,25 @@ func (_m *DBRepository) List() ([]v1alpha1.Workflow, error) { return r0, r1 } -// Query provides a mock function with given fields: condition -func (_m *DBRepository) Query(condition interface{}) ([]v1alpha1.Workflow, error) { - ret := _m.Called(condition) +// QueryWithPagination provides a mock function with given fields: condition, pageSize, lastID, orderBy +func (_m *DBRepository) QueryWithPagination(condition db.Cond, pageSize uint, lastID string, orderBy ...interface{}) (*v1alpha1.WorkflowList, error) { + var _ca []interface{} + _ca = append(_ca, condition, pageSize, lastID) + _ca = append(_ca, orderBy...) + ret := _m.Called(_ca...) - var r0 []v1alpha1.Workflow - if rf, ok := ret.Get(0).(func(interface{}) []v1alpha1.Workflow); ok { - r0 = rf(condition) + var r0 *v1alpha1.WorkflowList + if rf, ok := ret.Get(0).(func(db.Cond, uint, string, ...interface{}) *v1alpha1.WorkflowList); ok { + r0 = rf(condition, pageSize, lastID, orderBy...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]v1alpha1.Workflow) + r0 = ret.Get(0).(*v1alpha1.WorkflowList) } } var r1 error - if rf, ok := ret.Get(1).(func(interface{}) error); ok { - r1 = rf(condition) + if rf, ok := ret.Get(1).(func(db.Cond, uint, string, ...interface{}) error); ok { + r1 = rf(condition, pageSize, lastID, orderBy...) } else { r1 = ret.Error(1) } From 076afb77c897ba6a07d01326ee3a70c85ebc63cf Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Mon, 16 Dec 2019 16:05:44 -0800 Subject: [PATCH 016/421] codegen --- Gopkg.lock | 31 ---------- Makefile | 11 +++- cmd/server/main.go | 7 +-- cmd/server/workflow/workflow.swagger.json | 37 +++++++++++- .../workflow-template.swagger.json | 29 ++++++++++ .../argo-server/argo-server-deployment.yaml | 32 +++++++++++ .../base/argo-server/argo-server-service.yaml | 14 +++++ manifests/base/argo-server/kustomization.yaml | 6 ++ manifests/base/kustomization.yaml | 9 +-- manifests/install.yaml | 57 +++++++++++++++++++ manifests/namespace-install.yaml | 57 +++++++++++++++++++ pkg/client/clientset/versioned/clientset.go | 1 - 12 files changed, 242 insertions(+), 49 deletions(-) create mode 100644 manifests/base/argo-server/argo-server-deployment.yaml create mode 100644 manifests/base/argo-server/argo-server-service.yaml create mode 100644 manifests/base/argo-server/kustomization.yaml diff --git a/Gopkg.lock b/Gopkg.lock index 2d1a2459ec15..bc758d070167 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -55,25 +55,6 @@ pruneopts = "" revision = "de5bf2ad457846296e2031421a34e2568e304e35" -[[projects]] - branch = "master" - digest = "1:a7f619ffc7b99687f9444bd0a07509fec8ae708a7175d234878129896c0918a8" - name = "github.com/alecthomas/template" - packages = [ - ".", - "parse", - ] - pruneopts = "" - revision = "fb15b899a75114aa79cc930e33c46b577cc664b1" - -[[projects]] - branch = "master" - digest = "1:9d943843b71c5d44f184893fcdbe419bf639fee8647ceeca4c7d4fd95923721c" - name = "github.com/alecthomas/units" - packages = ["."] - pruneopts = "" - revision = "f65c72e2690dc4b403c8bd637baf4611cd4c069b" - [[projects]] branch = "master" digest = "1:52905b00a73cda93a2ce8c5fa35185daed673d59e39576e81ad6ab6fb7076b3c" @@ -606,7 +587,6 @@ packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "log", "model", ] pruneopts = "" @@ -851,8 +831,6 @@ "cpu", "unix", "windows", - "windows/registry", - "windows/svc/eventlog", ] pruneopts = "" revision = "9109b7679e13aa34a54834cfb4949cac4b96e576" @@ -1003,14 +981,6 @@ revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb" version = "v1.23.0" -[[projects]] - digest = "1:15d017551627c8bb091bde628215b2861bed128855343fdd570c62d08871f6e1" - name = "gopkg.in/alecthomas/kingpin.v2" - packages = ["."] - pruneopts = "" - revision = "947dcec5ba9c011838740e680966fd7087a71d0d" - version = "v2.2.6" - [[projects]] digest = "1:75fb3fcfc73a8c723efde7777b40e8e8ff9babf30d8c56160d01beffea8a95a6" name = "gopkg.in/inf.v0" @@ -1530,7 +1500,6 @@ "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/prometheus/common/log", "github.com/sirupsen/logrus", "github.com/soheilhy/cmux", "github.com/spf13/cobra", diff --git a/Makefile b/Makefile index fbd26d2fb620..3049cc8f28b7 100644 --- a/Makefile +++ b/Makefile @@ -213,19 +213,24 @@ start-e2e: kubectl -n argo apply --wait --force -f manifests/install.yaml # Ensure that we use the image we're about to create. kubectl -n argo scale deployment/workflow-controller --replicas 0 - # Change to use a "e2e" tag. + kubectl -n argo scale deployment/argo-server --replicas 0 + # Change to use a "dev" tag. kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}]' kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/workflow-controller:dev"}]' kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--executor-image", "argoproj/argoexec:dev", "--executor-image-pull-policy", "Never"]}]' + kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/workflow-controller:dev"}]' + kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug"]}]' # Install MinIO and set-up config-map. kubectl -n argo apply --wait --force -f test/e2e/manifests # Build controller and executor images. - make controller-image executor-image DEV_IMAGE=true IMAGE_PREFIX=argoproj/ IMAGE_TAG=dev + make controller-image executor-image argo-server-image DEV_IMAGE=true IMAGE_PREFIX=argoproj/ IMAGE_TAG=dev # Scale up. kubectl -n argo scale deployment/workflow-controller --replicas 1 + kubectl -n argo scale deployment/argo-server --replicas 1 # Wait for pods to be ready. kubectl -n argo wait --for=condition=Ready pod --all -l app=workflow-controller - kubectl -n argo wait --for=condition=Ready pod --all -l app=minio --timeout=1m + kubectl -n argo wait --for=condition=Ready pod --all -l app=argo-server + kubectl -n argo wait --for=condition=Ready pod --all -l app=minio # Switch to "argo" ns. kubectl config set-context --current --namespace=argo # Pull whalesay. This is used a lot in the tests, so good to have it ready now. diff --git a/cmd/server/main.go b/cmd/server/main.go index 592dd742f8dd..a8f0002837b5 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -25,7 +25,7 @@ import ( const ( // CLIName is the name of the CLI - CLIName = "argo-api-server" + CLIName = "argo-server" ) // NewRootCommand returns an new instance of the workflow-controller main entrypoint @@ -40,7 +40,6 @@ func NewRootCommand() *cobra.Command { var command = cobra.Command{ Use: CLIName, - Short: "argo-api-server is Argo's API server", RunE: func(c *cobra.Command, args []string) error { cli.SetLogLevel(logLevel) stats.RegisterStackDumper() @@ -87,10 +86,10 @@ func NewRootCommand() *cobra.Command { clientConfig = kubecli.AddKubectlFlagsToCmd(&command) command.AddCommand(cmdutil.NewVersionCmd(CLIName)) - command.Flags().IntVar(&port, "port", 8080, "") + command.Flags().IntVarP(&port, "port", "p", 2746 , "Port to listen on") command.Flags().StringVar(&enableClientAuth, "enableClientAuth", "false", "") command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration") - command.Flags().StringVar(&logLevel, "loglevel", "debug", "Set the logging level. One of: debug|info|warn|error") + command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") return &command } diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json index 600afe3a45c2..db21ec3f0627 100644 --- a/cmd/server/workflow/workflow.swagger.json +++ b/cmd/server/workflow/workflow.swagger.json @@ -3292,6 +3292,21 @@ }, "title": "ArtifactoryAuth describes the secret selectors required for authenticating to artifactory" }, + "v1alpha1Backoff": { + "type": "object", + "properties": { + "duration": { + "type": "string" + }, + "factor": { + "type": "integer", + "format": "int32" + }, + "maxDuration": { + "type": "string" + } + } + }, "v1alpha1ContinueOn": { "type": "object", "properties": { @@ -3739,6 +3754,14 @@ "type": "integer", "format": "int32", "title": "Limit is the maximum number of attempts when retrying a container" + }, + "retryPolicy": { + "type": "string", + "title": "RetryPolicy is a policy of NodePhase statuses that will be retried" + }, + "backoff": { + "$ref": "#/definitions/v1alpha1Backoff", + "title": "Backoff is a backoff strategy" } }, "title": "RetryStrategy provides controls on how to retry a workflow step" @@ -3828,6 +3851,12 @@ }, "v1alpha1SuspendTemplate": { "type": "object", + "properties": { + "duration": { + "type": "string", + "title": "Duration is the seconds to wait before automatically resuming a template" + } + }, "title": "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time" }, "v1alpha1TarStrategy": { @@ -4449,11 +4478,15 @@ }, "storedTemplateID": { "type": "string", - "description": "StoredTemplateID is the ID of stored template." + "description": "StoredTemplateID is the ID of stored template.\nDEPRECATED: This value is not used anymore." }, "workflowTemplateName": { "type": "string", - "description": "WorkflowTemplateName is the WorkflowTemplate resource name on which the resolved template of this node is retrieved." + "description": "WorkflowTemplateName is the WorkflowTemplate resource name on which the resolved template of this node is retrieved.\nDEPRECATED: This value is not used anymore." + }, + "templateScope": { + "type": "string", + "description": "TemplateScope is the template scope in which the template of this node was retrieved." }, "phase": { "type": "string", diff --git a/cmd/server/workflowtemplate/workflow-template.swagger.json b/cmd/server/workflowtemplate/workflow-template.swagger.json index 9a51bc36d808..e87a5d4f267a 100644 --- a/cmd/server/workflowtemplate/workflow-template.swagger.json +++ b/cmd/server/workflowtemplate/workflow-template.swagger.json @@ -2689,6 +2689,21 @@ }, "title": "ArtifactoryAuth describes the secret selectors required for authenticating to artifactory" }, + "v1alpha1Backoff": { + "type": "object", + "properties": { + "duration": { + "type": "string" + }, + "factor": { + "type": "integer", + "format": "int32" + }, + "maxDuration": { + "type": "string" + } + } + }, "v1alpha1ContinueOn": { "type": "object", "properties": { @@ -3127,6 +3142,14 @@ "type": "integer", "format": "int32", "title": "Limit is the maximum number of attempts when retrying a container" + }, + "retryPolicy": { + "type": "string", + "title": "RetryPolicy is a policy of NodePhase statuses that will be retried" + }, + "backoff": { + "$ref": "#/definitions/v1alpha1Backoff", + "title": "Backoff is a backoff strategy" } }, "title": "RetryStrategy provides controls on how to retry a workflow step" @@ -3216,6 +3239,12 @@ }, "v1alpha1SuspendTemplate": { "type": "object", + "properties": { + "duration": { + "type": "string", + "title": "Duration is the seconds to wait before automatically resuming a template" + } + }, "title": "SuspendTemplate is a template subtype to suspend a workflow at a predetermined point in time" }, "v1alpha1TarStrategy": { diff --git a/manifests/base/argo-server/argo-server-deployment.yaml b/manifests/base/argo-server/argo-server-deployment.yaml new file mode 100644 index 000000000000..cf1753b57ddc --- /dev/null +++ b/manifests/base/argo-server/argo-server-deployment.yaml @@ -0,0 +1,32 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argo-server +spec: + selector: + matchLabels: + app: argo-server + replicas: 0 + template: + metadata: + labels: + app: argo-server + spec: + containers: + - name: main + image: argo-server/argo-server:latest + ports: + - containerPort: 2746 + hostPort: 2746 + readinessProbe: + httpGet: + path: /api/v1/workflows/argo + port: 2746 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + httpGet: + path: /api/v1/workflows/argo + port: 2746 + initialDelaySeconds: 5 + periodSeconds: 10 \ No newline at end of file diff --git a/manifests/base/argo-server/argo-server-service.yaml b/manifests/base/argo-server/argo-server-service.yaml new file mode 100644 index 000000000000..cc14cee9a022 --- /dev/null +++ b/manifests/base/argo-server/argo-server-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: argo-server + labels: + app: argo-server +spec: + selector: + app: argo-server + ports: + - protocol: TCP + port: 2746 + targetPort: 2746 + diff --git a/manifests/base/argo-server/kustomization.yaml b/manifests/base/argo-server/kustomization.yaml new file mode 100644 index 000000000000..e85cea7e9b2c --- /dev/null +++ b/manifests/base/argo-server/kustomization.yaml @@ -0,0 +1,6 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: +- argo-server-deployment.yaml +- argo-server-service.yaml diff --git a/manifests/base/kustomization.yaml b/manifests/base/kustomization.yaml index dbc178cb7bfc..69f0be5c3ea5 100644 --- a/manifests/base/kustomization.yaml +++ b/manifests/base/kustomization.yaml @@ -1,15 +1,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization - -images: -- name: argoproj/argoui - newName: argoproj/argoui - newTag: latest -- name: argoproj/workflow-controller - newName: argoproj/workflow-controller - newTag: latest resources: - crds - workflow-controller - argo-ui +- argo-server diff --git a/manifests/install.yaml b/manifests/install.yaml index 589b8beb59d9..4641e1ddefd4 100644 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -4,6 +4,16 @@ kind: CustomResourceDefinition metadata: name: workflows.argoproj.io spec: + additionalPrinterColumns: + - JSONPath: .status.phase + description: Status of the workflow + name: Status + type: string + - JSONPath: .status.startedAt + description: When the workflow was started + format: date-time + name: Age + type: date group: argoproj.io names: kind: Workflow @@ -254,6 +264,20 @@ metadata: --- apiVersion: v1 kind: Service +metadata: + labels: + app: argo-server + name: argo-server +spec: + ports: + - port: 2746 + protocol: TCP + targetPort: 2746 + selector: + app: argo-server +--- +apiVersion: v1 +kind: Service metadata: name: argo-ui spec: @@ -265,6 +289,39 @@ spec: --- apiVersion: apps/v1 kind: Deployment +metadata: + name: argo-server +spec: + replicas: 0 + selector: + matchLabels: + app: argo-server + template: + metadata: + labels: + app: argo-server + spec: + containers: + - image: argo-server/argo-server:dev + livenessProbe: + httpGet: + path: /api/v1/workflows/argo + port: 2746 + initialDelaySeconds: 5 + periodSeconds: 10 + name: main + ports: + - containerPort: 2746 + hostPort: 2746 + readinessProbe: + httpGet: + path: /api/v1/workflows/argo + port: 2746 + initialDelaySeconds: 5 + periodSeconds: 10 +--- +apiVersion: apps/v1 +kind: Deployment metadata: name: argo-ui spec: diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index 277b9e22f826..22349d7235d3 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -4,6 +4,16 @@ kind: CustomResourceDefinition metadata: name: workflows.argoproj.io spec: + additionalPrinterColumns: + - JSONPath: .status.phase + description: Status of the workflow + name: Status + type: string + - JSONPath: .status.startedAt + description: When the workflow was started + format: date-time + name: Age + type: date group: argoproj.io names: kind: Workflow @@ -169,6 +179,20 @@ metadata: --- apiVersion: v1 kind: Service +metadata: + labels: + app: argo-server + name: argo-server +spec: + ports: + - port: 2746 + protocol: TCP + targetPort: 2746 + selector: + app: argo-server +--- +apiVersion: v1 +kind: Service metadata: name: argo-ui spec: @@ -180,6 +204,39 @@ spec: --- apiVersion: apps/v1 kind: Deployment +metadata: + name: argo-server +spec: + replicas: 0 + selector: + matchLabels: + app: argo-server + template: + metadata: + labels: + app: argo-server + spec: + containers: + - image: argo-server/argo-server:dev + livenessProbe: + httpGet: + path: /api/v1/workflows/argo + port: 2746 + initialDelaySeconds: 5 + periodSeconds: 10 + name: main + ports: + - containerPort: 2746 + hostPort: 2746 + readinessProbe: + httpGet: + path: /api/v1/workflows/argo + port: 2746 + initialDelaySeconds: 5 + periodSeconds: 10 +--- +apiVersion: apps/v1 +kind: Deployment metadata: name: argo-ui spec: diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 530f8f837986..d67394f8553a 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -23,7 +23,6 @@ type Clientset struct { argoprojV1alpha1 *argoprojv1alpha1.ArgoprojV1alpha1Client } - // ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface { return c.argoprojV1alpha1 From aec07f5cd769233f046def6a8347bda711f98a78 Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Mon, 16 Dec 2019 16:29:39 -0800 Subject: [PATCH 017/421] fix --- Makefile | 9 ++++--- cmd/server/apiserver/argoserver.go | 2 +- demo.md | 10 ++++++++ .../argo-server/argo-server-deployment.yaml | 25 ++++++++++--------- manifests/base/kustomization.yaml | 7 ++++++ manifests/install.yaml | 14 +---------- manifests/namespace-install.yaml | 14 +---------- 7 files changed, 38 insertions(+), 43 deletions(-) diff --git a/Makefile b/Makefile index 3049cc8f28b7..327f73ff47b4 100644 --- a/Makefile +++ b/Makefile @@ -211,19 +211,20 @@ start-e2e: kubectl create ns argo || true # Install the standard Argo. kubectl -n argo apply --wait --force -f manifests/install.yaml - # Ensure that we use the image we're about to create. + # Scale down in preparation for re-configuration. kubectl -n argo scale deployment/workflow-controller --replicas 0 kubectl -n argo scale deployment/argo-server --replicas 0 - # Change to use a "dev" tag. + # Change to use a "dev" tag and enable debug logging. kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}]' kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/workflow-controller:dev"}]' kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--executor-image", "argoproj/argoexec:dev", "--executor-image-pull-policy", "Never"]}]' - kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/workflow-controller:dev"}]' + kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}]' + kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/argo-server:dev"}]' kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug"]}]' # Install MinIO and set-up config-map. kubectl -n argo apply --wait --force -f test/e2e/manifests # Build controller and executor images. - make controller-image executor-image argo-server-image DEV_IMAGE=true IMAGE_PREFIX=argoproj/ IMAGE_TAG=dev + make controller-image argo-server-image executor-image DEV_IMAGE=true IMAGE_PREFIX=argoproj/ IMAGE_TAG=dev # Scale up. kubectl -n argo scale deployment/workflow-controller --replicas 1 kubectl -n argo scale deployment/argo-server --replicas 1 diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index 22be6b115f57..0b6de3707e00 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -128,7 +128,7 @@ func (as *ArgoServer) Run(ctx context.Context, port int) { go func() { as.checkServeErr("httpsServer", httpsServer.Serve(httpsL)) }() go func() { as.checkServeErr("tlsm", tlsm.Serve()) }() } - log.Info("Argo API Server started successfully") + log.Infof("Argo Server started successfully on port %v", port) as.stopCh = make(chan struct{}) <-as.stopCh } diff --git a/demo.md b/demo.md index cd77e8bf5989..56f3f508b9b5 100644 --- a/demo.md +++ b/demo.md @@ -178,3 +178,13 @@ NOTE: On Minikube, you won't get an external IP after updating the service -- it ``` minikube service -n argo --url argo-ui ``` + +### 9. Access The Argo Server + +The Argo Server provide API access to Argo Workflows. This is scaled to zero by default. + +``` +kubectl -n argo scale deployment/argo-server --replicas 1 +kubectl -n argo port-forward svc/argo-server 2746:2746 +curl http://127.0.0.1:2746/api/v1/workflows/argo +``` \ No newline at end of file diff --git a/manifests/base/argo-server/argo-server-deployment.yaml b/manifests/base/argo-server/argo-server-deployment.yaml index cf1753b57ddc..0a9ece5afcc3 100644 --- a/manifests/base/argo-server/argo-server-deployment.yaml +++ b/manifests/base/argo-server/argo-server-deployment.yaml @@ -18,15 +18,16 @@ spec: ports: - containerPort: 2746 hostPort: 2746 - readinessProbe: - httpGet: - path: /api/v1/workflows/argo - port: 2746 - initialDelaySeconds: 5 - periodSeconds: 10 - livenessProbe: - httpGet: - path: /api/v1/workflows/argo - port: 2746 - initialDelaySeconds: 5 - periodSeconds: 10 \ No newline at end of file + # TODO - need to fix this + #readinessProbe: + # httpGet: + # path: /api/v1/workflows/argo + # port: 2746 + # initialDelaySeconds: 5 + # periodSeconds: 10 + #livenessProbe: + # httpGet: + # path: /api/v1/workflows/argo + # port: 2746 + # initialDelaySeconds: 5 + # periodSeconds: 10 \ No newline at end of file diff --git a/manifests/base/kustomization.yaml b/manifests/base/kustomization.yaml index 69f0be5c3ea5..6bc291332d17 100644 --- a/manifests/base/kustomization.yaml +++ b/manifests/base/kustomization.yaml @@ -6,3 +6,10 @@ resources: - workflow-controller - argo-ui - argo-server +images: +- name: argoproj/argoui + newName: argoproj/argoui + newTag: latest +- name: argoproj/workflow-controller + newName: argoproj/workflow-controller + newTag: latest diff --git a/manifests/install.yaml b/manifests/install.yaml index 4641e1ddefd4..abc378c816ff 100644 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -302,23 +302,11 @@ spec: app: argo-server spec: containers: - - image: argo-server/argo-server:dev - livenessProbe: - httpGet: - path: /api/v1/workflows/argo - port: 2746 - initialDelaySeconds: 5 - periodSeconds: 10 + - image: argo-server/argo-server:latest name: main ports: - containerPort: 2746 hostPort: 2746 - readinessProbe: - httpGet: - path: /api/v1/workflows/argo - port: 2746 - initialDelaySeconds: 5 - periodSeconds: 10 --- apiVersion: apps/v1 kind: Deployment diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index 22349d7235d3..df60a8e92823 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -217,23 +217,11 @@ spec: app: argo-server spec: containers: - - image: argo-server/argo-server:dev - livenessProbe: - httpGet: - path: /api/v1/workflows/argo - port: 2746 - initialDelaySeconds: 5 - periodSeconds: 10 + - image: argo-server/argo-server:latest name: main ports: - containerPort: 2746 hostPort: 2746 - readinessProbe: - httpGet: - path: /api/v1/workflows/argo - port: 2746 - initialDelaySeconds: 5 - periodSeconds: 10 --- apiVersion: apps/v1 kind: Deployment From 659cb92d5d1214688ac8d40a64e8b997443ad722 Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Mon, 16 Dec 2019 21:14:17 -0800 Subject: [PATCH 018/421] changes --- Gopkg.lock | 17 + Makefile | 9 +- cmd/server/apiserver/argoserver.go | 64 +- cmd/server/main.go | 23 +- cmd/server/workflow/workflow.pb.go | 843 ++---------------- cmd/server/workflow/workflow.proto | 13 - cmd/server/workflow/workflow.swagger.json | 39 - cmd/server/workflow/workflow_server.go | 152 +--- cmd/server/workflow/workflow_service.go | 33 +- .../workflow_template_server.go | 38 +- pkg/apiclient/apiclient.go | 4 - test/e2e/argo_server_test.go | 94 ++ util/grpc/interceptor.go | 36 + 13 files changed, 367 insertions(+), 998 deletions(-) create mode 100644 test/e2e/argo_server_test.go create mode 100644 util/grpc/interceptor.go diff --git a/Gopkg.lock b/Gopkg.lock index bc758d070167..cb0a08c631f3 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -370,6 +370,21 @@ revision = "c3e18be99d19e6b3e8f1559eea2c161a665c4b6b" version = "v1.4.1" +[[projects]] + digest = "1:cbea643bd7f1c76bb6e48ab08f3dd01456602ab7b252f3c85133d7a1a6413a18" + name = "github.com/grpc-ecosystem/go-grpc-middleware" + packages = [ + ".", + "logging", + "logging/logrus", + "logging/logrus/ctxlogrus", + "tags", + "tags/logrus", + ] + pruneopts = "" + revision = "dd15ed025b6054e5253963e355991f3070d4e593" + version = "v1.1.0" + [[projects]] digest = "1:0ebfd2f00a84ee4fb31913b49011b7fa2fb6b12040991d8b948db821a15f7f77" name = "github.com/grpc-ecosystem/grpc-gateway" @@ -1494,6 +1509,8 @@ "github.com/golang/glog", "github.com/golang/protobuf/proto", "github.com/gorilla/websocket", + "github.com/grpc-ecosystem/go-grpc-middleware", + "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus", "github.com/grpc-ecosystem/grpc-gateway/runtime", "github.com/grpc-ecosystem/grpc-gateway/utilities", "github.com/mitchellh/go-ps", diff --git a/Makefile b/Makefile index 327f73ff47b4..4b904b68b910 100644 --- a/Makefile +++ b/Makefile @@ -220,7 +220,7 @@ start-e2e: kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--executor-image", "argoproj/argoexec:dev", "--executor-image-pull-policy", "Never"]}]' kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}]' kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/argo-server:dev"}]' - kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug"]}]' + kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--insecure"]}]' # Install MinIO and set-up config-map. kubectl -n argo apply --wait --force -f test/e2e/manifests # Build controller and executor images. @@ -232,6 +232,11 @@ start-e2e: kubectl -n argo wait --for=condition=Ready pod --all -l app=workflow-controller kubectl -n argo wait --for=condition=Ready pod --all -l app=argo-server kubectl -n argo wait --for=condition=Ready pod --all -l app=minio + # Set-up port-forwards + killall kubectl || true + kubectl -n argo port-forward deployment/argo-ui 8001:8001 & + kubectl -n argo port-forward svc/minio 9000:9000 & + kubectl -n argo port-forward svc/argo-server 2746:2746 & # Switch to "argo" ns. kubectl config set-context --current --namespace=argo # Pull whalesay. This is used a lot in the tests, so good to have it ready now. @@ -239,7 +244,7 @@ start-e2e: .PHONY: logs-e2e logs-e2e: - kubectl -n argo get pods -l app=workflow-controller -o name | xargs kubectl -n argo logs -f + kubectl -n argo logs -f -l app .PHONY: test-e2e test-e2e: diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index 0b6de3707e00..8d84edfa6b7b 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -8,6 +8,8 @@ import ( "time" golang_proto "github.com/golang/protobuf/proto" + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus" "github.com/grpc-ecosystem/grpc-gateway/runtime" log "github.com/sirupsen/logrus" "github.com/soheilhy/cmux" @@ -24,18 +26,20 @@ import ( "github.com/argoproj/argo/errors" "github.com/argoproj/argo/pkg/apiclient" "github.com/argoproj/argo/pkg/client/clientset/versioned" + grpcutil "github.com/argoproj/argo/util/grpc" "github.com/argoproj/argo/util/json" "github.com/argoproj/argo/workflow/common" "github.com/argoproj/argo/workflow/config" ) type ArgoServer struct { - Namespace string - KubeClientset *kubernetes.Clientset - WfClientSet *versioned.Clientset - EnableClientAuth bool - Config *config.WorkflowControllerConfig - ConfigName string + namespace string + kubeClientset *kubernetes.Clientset + wfClientSet *versioned.Clientset + enableClientAuth bool + insecure bool + config *config.WorkflowControllerConfig + configName string stopCh chan struct{} } @@ -50,11 +54,12 @@ type ArgoServerOpts struct { func NewArgoServer(opts ArgoServerOpts) *ArgoServer { return &ArgoServer{ - Namespace: opts.Namespace, - WfClientSet: opts.WfClientSet, - KubeClientset: opts.KubeClientset, - EnableClientAuth: opts.EnableClientAuth, - ConfigName: opts.ConfigName, + namespace: opts.Namespace, + wfClientSet: opts.WfClientSet, + kubeClientset: opts.KubeClientset, + enableClientAuth: opts.EnableClientAuth, + insecure: opts.Insecure, + configName: opts.ConfigName, } } @@ -66,7 +71,7 @@ var backoff = wait.Backoff{ } func (as *ArgoServer) useTLS() bool { - return false + return !as.insecure } func (as *ArgoServer) Run(ctx context.Context, port int) { @@ -134,6 +139,7 @@ func (as *ArgoServer) Run(ctx context.Context, port int) { } func (as *ArgoServer) newGRPCServer() *grpc.Server { + serverLog := log.NewEntry(log.StandardLogger()) sOpts := []grpc.ServerOption{ // Set both the send and receive the bytes limit to be 100MB // The proper way to achieve high performance is to have pagination @@ -141,18 +147,26 @@ func (as *ArgoServer) newGRPCServer() *grpc.Server { grpc.MaxRecvMsgSize(apiclient.MaxGRPCMessageSize), grpc.MaxSendMsgSize(apiclient.MaxGRPCMessageSize), grpc.ConnectionTimeout(300 * time.Second), + grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( + grpc_logrus.UnaryServerInterceptor(serverLog), + grpcutil.PanicLoggerUnaryServerInterceptor(serverLog), + )), + grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( + grpc_logrus.StreamServerInterceptor(serverLog), + grpcutil.PanicLoggerStreamServerInterceptor(serverLog), + )), } grpcServer := grpc.NewServer(sOpts...) - configMap, err := as.RsyncConfig(as.Namespace, as.WfClientSet, as.KubeClientset) + configMap, err := as.RsyncConfig(as.namespace, as.wfClientSet, as.kubeClientset) if err != nil { // TODO: this currently returns an error every time log.Errorf("Error marshalling config map: %s", err) } - workflowServer := workflow.NewWorkflowServer(as.Namespace, as.WfClientSet, as.KubeClientset, configMap, as.EnableClientAuth) + workflowServer := workflow.NewWorkflowServer(as.namespace, as.wfClientSet, as.kubeClientset, configMap, as.enableClientAuth) workflow.RegisterWorkflowServiceServer(grpcServer, workflowServer) - workflowTemplateServer := workflowtemplate.NewWorkflowTemplateServer(as.Namespace, as.WfClientSet, as.KubeClientset, configMap, as.EnableClientAuth) + workflowTemplateServer := workflowtemplate.NewWorkflowTemplateServer(as.namespace, as.wfClientSet, as.kubeClientset, configMap, as.enableClientAuth) workflowtemplate.RegisterWorkflowTemplateServiceServer(grpcServer, workflowTemplateServer) return grpcServer @@ -160,7 +174,7 @@ func (as *ArgoServer) newGRPCServer() *grpc.Server { // newHTTPServer returns the HTTP server to serve HTTP/HTTPS requests. This is implemented // using grpc-gateway as a proxy to the gRPC server. -func (a *ArgoServer) newHTTPServer(ctx context.Context, port int) *http.Server { +func (as *ArgoServer) newHTTPServer(ctx context.Context, port int) *http.Server { endpoint := fmt.Sprintf("localhost:%d", port) mux := http.NewServeMux() @@ -181,7 +195,7 @@ func (a *ArgoServer) newHTTPServer(ctx context.Context, port int) *http.Server { // time.Time, but does not support custom UnmarshalJSON() and MarshalJSON() methods. Therefore // we use our own Marshaler gwMuxOpts := runtime.WithMarshalerOption(runtime.MIMEWildcard, new(json.JSONMarshaler)) - gwCookieOpts := runtime.WithForwardResponseOption(a.translateGrpcCookieHeader) + gwCookieOpts := runtime.WithForwardResponseOption(as.translateGrpcCookieHeader) gwmux := runtime.NewServeMux(gwMuxOpts, gwCookieOpts) mustRegisterGWHandler(workflow.RegisterWorkflowServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) mustRegisterGWHandler(workflowtemplate.RegisterWorkflowTemplateServiceHandlerFromEndpoint, ctx, gwmux, endpoint, dialOpts) @@ -214,25 +228,25 @@ func newRedirectServer(port int) *http.Server { } // TranslateGrpcCookieHeader conditionally sets a cookie on the response. -func (a *ArgoServer) translateGrpcCookieHeader(ctx context.Context, w http.ResponseWriter, resp golang_proto.Message) error { - +func (as *ArgoServer) translateGrpcCookieHeader(ctx context.Context, w http.ResponseWriter, resp golang_proto.Message) error { + // TODO - what is the point of this func? return nil } // ResyncConfig reloads the controller config from the configmap -func (a *ArgoServer) RsyncConfig(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset) (*config.WorkflowControllerConfig, error) { +func (as *ArgoServer) RsyncConfig(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset) (*config.WorkflowControllerConfig, error) { cmClient := kubeClientSet.CoreV1().ConfigMaps(namespace) cm, err := cmClient.Get("workflow-controller-configmap", metav1.GetOptions{}) if err != nil { return nil, errors.InternalWrapError(err) } - return a.UpdateConfig(cm) + return as.UpdateConfig(cm) } -func (a *ArgoServer) UpdateConfig(cm *apiv1.ConfigMap) (*config.WorkflowControllerConfig, error) { +func (as *ArgoServer) UpdateConfig(cm *apiv1.ConfigMap) (*config.WorkflowControllerConfig, error) { configStr, ok := cm.Data[common.WorkflowControllerConfigMapKey] if !ok { - return nil, errors.InternalErrorf("ConfigMap '%s' does not have key '%s'", a.ConfigName, common.WorkflowControllerConfigMapKey) + return nil, errors.InternalErrorf("ConfigMap '%s' does not have key '%s'", as.configName, common.WorkflowControllerConfigMapKey) } var config config.WorkflowControllerConfig log.Infof("Config Map: %s", configStr) @@ -244,9 +258,9 @@ func (a *ArgoServer) UpdateConfig(cm *apiv1.ConfigMap) (*config.WorkflowControll } // checkServeErr checks the error from a .Serve() call to decide if it was a graceful shutdown -func (a *ArgoServer) checkServeErr(name string, err error) { +func (as *ArgoServer) checkServeErr(name string, err error) { if err != nil { - if a.stopCh == nil { + if as.stopCh == nil { // a nil stopCh indicates a graceful shutdown log.Infof("graceful shutdown %s: %v", name, err) } else { diff --git a/cmd/server/main.go b/cmd/server/main.go index a8f0002837b5..2e38ee4ef3c9 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -3,9 +3,10 @@ package main import ( "fmt" "os" - "strconv" "time" + "github.com/argoproj/pkg/cli" + kubecli "github.com/argoproj/pkg/kube/cli" "github.com/argoproj/pkg/stats" "github.com/spf13/cobra" "golang.org/x/net/context" @@ -19,8 +20,6 @@ import ( "github.com/argoproj/argo/cmd/server/apiserver" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" cmdutil "github.com/argoproj/argo/util/cmd" - "github.com/argoproj/pkg/cli" - kubecli "github.com/argoproj/pkg/kube/cli" ) const ( @@ -33,13 +32,14 @@ func NewRootCommand() *cobra.Command { var ( clientConfig clientcmd.ClientConfig logLevel string // --loglevel - enableClientAuth string + enableClientAuth bool + insecure bool configMap string port int ) var command = cobra.Command{ - Use: CLIName, + Use: CLIName, RunE: func(c *cobra.Command, args []string) error { cli.SetLogLevel(logLevel) stats.RegisterStackDumper() @@ -63,16 +63,12 @@ func NewRootCommand() *cobra.Command { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - clientAuth, err := strconv.ParseBool(enableClientAuth) - if err != nil { - return err - } - opts := apiserver.ArgoServerOpts{ Namespace: namespace, WfClientSet: wflientset, KubeClientset: kubeConfig, - EnableClientAuth: clientAuth, + EnableClientAuth: enableClientAuth, + Insecure: insecure, } apiServer := apiserver.NewArgoServer(opts) @@ -86,8 +82,9 @@ func NewRootCommand() *cobra.Command { clientConfig = kubecli.AddKubectlFlagsToCmd(&command) command.AddCommand(cmdutil.NewVersionCmd(CLIName)) - command.Flags().IntVarP(&port, "port", "p", 2746 , "Port to listen on") - command.Flags().StringVar(&enableClientAuth, "enableClientAuth", "false", "") + command.Flags().IntVarP(&port, "port", "p", 2746, "Port to listen on") + command.Flags().BoolVar(&enableClientAuth, "enable-client-auth", false, "Enable client auth") + command.Flags().BoolVar(&insecure, "insecure", false, "Insecure") command.Flags().StringVar(&configMap, "configmap", "workflow-controller-configmap", "Name of K8s configmap to retrieve workflow controller configuration") command.Flags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error") return &command diff --git a/cmd/server/workflow/workflow.pb.go b/cmd/server/workflow/workflow.pb.go index b548fff74506..742e029d113e 100644 --- a/cmd/server/workflow/workflow.pb.go +++ b/cmd/server/workflow/workflow.pb.go @@ -35,122 +35,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -type SubmitOptions struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - GenerateName string `protobuf:"bytes,2,opt,name=generateName,proto3" json:"generateName,omitempty"` - InstanceID string `protobuf:"bytes,3,opt,name=instanceID,proto3" json:"instanceID,omitempty"` - Entrypoint string `protobuf:"bytes,4,opt,name=entrypoint,proto3" json:"entrypoint,omitempty"` - Parameters []string `protobuf:"bytes,5,rep,name=parameters,proto3" json:"parameters,omitempty"` - ServiceAccount string `protobuf:"bytes,6,opt,name=serviceAccount,proto3" json:"serviceAccount,omitempty"` - ServerDryRun bool `protobuf:"varint,7,opt,name=serverDryRun,proto3" json:"serverDryRun,omitempty"` - Labels string `protobuf:"bytes,8,opt,name=labels,proto3" json:"labels,omitempty"` - OwnerReference *v1.OwnerReference `protobuf:"bytes,9,opt,name=ownerReference,proto3" json:"ownerReference,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SubmitOptions) Reset() { *m = SubmitOptions{} } -func (m *SubmitOptions) String() string { return proto.CompactTextString(m) } -func (*SubmitOptions) ProtoMessage() {} -func (*SubmitOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{0} -} -func (m *SubmitOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubmitOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SubmitOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SubmitOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubmitOptions.Merge(m, src) -} -func (m *SubmitOptions) XXX_Size() int { - return m.Size() -} -func (m *SubmitOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SubmitOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SubmitOptions proto.InternalMessageInfo - -func (m *SubmitOptions) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *SubmitOptions) GetGenerateName() string { - if m != nil { - return m.GenerateName - } - return "" -} - -func (m *SubmitOptions) GetInstanceID() string { - if m != nil { - return m.InstanceID - } - return "" -} - -func (m *SubmitOptions) GetEntrypoint() string { - if m != nil { - return m.Entrypoint - } - return "" -} - -func (m *SubmitOptions) GetParameters() []string { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *SubmitOptions) GetServiceAccount() string { - if m != nil { - return m.ServiceAccount - } - return "" -} - -func (m *SubmitOptions) GetServerDryRun() bool { - if m != nil { - return m.ServerDryRun - } - return false -} - -func (m *SubmitOptions) GetLabels() string { - if m != nil { - return m.Labels - } - return "" -} - -func (m *SubmitOptions) GetOwnerReference() *v1.OwnerReference { - if m != nil { - return m.OwnerReference - } - return nil -} - type WorkflowCreateRequest struct { Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` Workflow *v1alpha1.Workflow `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` CreateOptions *v1.CreateOptions `protobuf:"bytes,3,opt,name=createOptions,proto3" json:"createOptions,omitempty"` - SubmitOptions *SubmitOptions `protobuf:"bytes,4,opt,name=submitOptions,proto3" json:"submitOptions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -160,7 +48,7 @@ func (m *WorkflowCreateRequest) Reset() { *m = WorkflowCreateRequest{} } func (m *WorkflowCreateRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowCreateRequest) ProtoMessage() {} func (*WorkflowCreateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{1} + return fileDescriptor_192bc67c39cca05a, []int{0} } func (m *WorkflowCreateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -210,13 +98,6 @@ func (m *WorkflowCreateRequest) GetCreateOptions() *v1.CreateOptions { return nil } -func (m *WorkflowCreateRequest) GetSubmitOptions() *SubmitOptions { - if m != nil { - return m.SubmitOptions - } - return nil -} - type WorkflowGetRequest struct { WorkflowName string `protobuf:"bytes,1,opt,name=workflowName,proto3" json:"workflowName,omitempty"` Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` @@ -230,7 +111,7 @@ func (m *WorkflowGetRequest) Reset() { *m = WorkflowGetRequest{} } func (m *WorkflowGetRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowGetRequest) ProtoMessage() {} func (*WorkflowGetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{2} + return fileDescriptor_192bc67c39cca05a, []int{1} } func (m *WorkflowGetRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -292,7 +173,7 @@ func (m *WorkflowListRequest) Reset() { *m = WorkflowListRequest{} } func (m *WorkflowListRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowListRequest) ProtoMessage() {} func (*WorkflowListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{3} + return fileDescriptor_192bc67c39cca05a, []int{2} } func (m *WorkflowListRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -348,7 +229,7 @@ func (m *WorkflowUpdateRequest) Reset() { *m = WorkflowUpdateRequest{} } func (m *WorkflowUpdateRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowUpdateRequest) ProtoMessage() {} func (*WorkflowUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{4} + return fileDescriptor_192bc67c39cca05a, []int{3} } func (m *WorkflowUpdateRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +294,7 @@ func (m *WorkflowLogRequest) Reset() { *m = WorkflowLogRequest{} } func (m *WorkflowLogRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowLogRequest) ProtoMessage() {} func (*WorkflowLogRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{5} + return fileDescriptor_192bc67c39cca05a, []int{4} } func (m *WorkflowLogRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -490,7 +371,7 @@ func (m *WorkflowDeleteRequest) Reset() { *m = WorkflowDeleteRequest{} } func (m *WorkflowDeleteRequest) String() string { return proto.CompactTextString(m) } func (*WorkflowDeleteRequest) ProtoMessage() {} func (*WorkflowDeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{6} + return fileDescriptor_192bc67c39cca05a, []int{5} } func (m *WorkflowDeleteRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -552,7 +433,7 @@ func (m *WorkflowDeleteResponse) Reset() { *m = WorkflowDeleteResponse{} func (m *WorkflowDeleteResponse) String() string { return proto.CompactTextString(m) } func (*WorkflowDeleteResponse) ProtoMessage() {} func (*WorkflowDeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{7} + return fileDescriptor_192bc67c39cca05a, []int{6} } func (m *WorkflowDeleteResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -607,7 +488,7 @@ func (m *LogEntry) Reset() { *m = LogEntry{} } func (m *LogEntry) String() string { return proto.CompactTextString(m) } func (*LogEntry) ProtoMessage() {} func (*LogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_192bc67c39cca05a, []int{8} + return fileDescriptor_192bc67c39cca05a, []int{7} } func (m *LogEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -651,7 +532,6 @@ func (m *LogEntry) GetTimeStamp() *v1.Time { } func init() { - proto.RegisterType((*SubmitOptions)(nil), "workflow.SubmitOptions") proto.RegisterType((*WorkflowCreateRequest)(nil), "workflow.WorkflowCreateRequest") proto.RegisterType((*WorkflowGetRequest)(nil), "workflow.WorkflowGetRequest") proto.RegisterType((*WorkflowListRequest)(nil), "workflow.WorkflowListRequest") @@ -665,77 +545,67 @@ func init() { func init() { proto.RegisterFile("cmd/server/workflow/workflow.proto", fileDescriptor_192bc67c39cca05a) } var fileDescriptor_192bc67c39cca05a = []byte{ - // 1113 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x97, 0xcf, 0x4f, 0x24, 0x45, - 0x14, 0xc7, 0x53, 0xfc, 0x18, 0x86, 0x62, 0xc1, 0x58, 0x2a, 0x4e, 0x3a, 0x2c, 0x8b, 0x9d, 0xa8, - 0x84, 0xac, 0xdd, 0x30, 0xb0, 0xba, 0xb2, 0xc1, 0x15, 0xc1, 0xa0, 0x09, 0xd9, 0xdd, 0x34, 0x98, - 0x0d, 0xc6, 0x4b, 0xd1, 0xf3, 0xb6, 0x69, 0xe9, 0xae, 0x6a, 0xbb, 0x6a, 0x20, 0x48, 0x38, 0xe8, - 0x61, 0xe3, 0xcd, 0x83, 0x31, 0xf1, 0x6e, 0x62, 0x36, 0x1b, 0xe3, 0xc9, 0x3f, 0xc2, 0xa3, 0xc6, - 0x7f, 0xc0, 0x10, 0x2f, 0x9e, 0xfd, 0x07, 0x4c, 0x55, 0xff, 0x86, 0x81, 0xcc, 0x08, 0x73, 0xeb, - 0x7a, 0xaf, 0xdf, 0xab, 0x4f, 0x7d, 0xdf, 0x9b, 0x57, 0xd3, 0xd8, 0x74, 0xc3, 0x96, 0x2d, 0x20, - 0x3e, 0x80, 0xd8, 0x3e, 0xe4, 0xf1, 0xfe, 0x93, 0x80, 0x1f, 0xe6, 0x0f, 0x56, 0x14, 0x73, 0xc9, - 0x49, 0x3d, 0x5b, 0x1b, 0x2f, 0x7b, 0xdc, 0xe3, 0xda, 0x68, 0xab, 0xa7, 0xc4, 0x6f, 0x4c, 0x79, - 0x9c, 0x7b, 0x01, 0xd8, 0x34, 0xf2, 0x6d, 0xca, 0x18, 0x97, 0x54, 0xfa, 0x9c, 0x89, 0xd4, 0xbb, - 0xb4, 0x7f, 0x57, 0x58, 0x3e, 0x57, 0xde, 0x90, 0xba, 0x7b, 0x3e, 0x83, 0xf8, 0xc8, 0x8e, 0xf6, - 0x3d, 0x65, 0x10, 0x76, 0x08, 0x92, 0xda, 0x07, 0x0b, 0xb6, 0x07, 0x0c, 0x62, 0x2a, 0xa1, 0x95, - 0x46, 0xad, 0x79, 0xbe, 0xdc, 0x6b, 0xef, 0x5a, 0x2e, 0x0f, 0x6d, 0x1a, 0xeb, 0x4d, 0x3f, 0xd7, - 0x0f, 0x45, 0x68, 0x8e, 0x7b, 0xb0, 0x40, 0x83, 0x68, 0x8f, 0x9e, 0x4f, 0x62, 0x16, 0x5b, 0xdb, - 0x2e, 0x8f, 0xa1, 0xc3, 0x46, 0xe6, 0x3f, 0x03, 0x78, 0x7c, 0xab, 0xbd, 0x1b, 0xfa, 0xf2, 0x61, - 0xa4, 0xb1, 0x09, 0xc1, 0x43, 0x8c, 0x86, 0xd0, 0x40, 0x33, 0x68, 0x76, 0xd4, 0xd1, 0xcf, 0xc4, - 0xc4, 0x37, 0xb2, 0xc0, 0x07, 0xca, 0x37, 0xa0, 0x7d, 0x15, 0x1b, 0x99, 0xc6, 0xd8, 0x67, 0x42, - 0x52, 0xe6, 0xc2, 0xc7, 0xeb, 0x8d, 0x41, 0xfd, 0x46, 0xc9, 0xa2, 0xfc, 0xc0, 0x64, 0x7c, 0x14, - 0x71, 0x9f, 0xc9, 0xc6, 0x50, 0xe2, 0x2f, 0x2c, 0xca, 0x1f, 0xd1, 0x98, 0x86, 0x20, 0x21, 0x16, - 0x8d, 0xe1, 0x99, 0x41, 0xe5, 0x2f, 0x2c, 0xe4, 0x0d, 0x3c, 0xa1, 0x0a, 0xe5, 0xbb, 0xb0, 0xea, - 0xba, 0xbc, 0xcd, 0x64, 0xa3, 0xa6, 0x73, 0x9c, 0xb1, 0x2a, 0xd6, 0xa4, 0xa0, 0xeb, 0xf1, 0x91, - 0xd3, 0x66, 0x8d, 0x91, 0x19, 0x34, 0x5b, 0x77, 0x2a, 0x36, 0x32, 0x89, 0x6b, 0x01, 0xdd, 0x85, - 0x40, 0x34, 0xea, 0x3a, 0x47, 0xba, 0x22, 0x9f, 0xe1, 0x09, 0x7e, 0xc8, 0x20, 0x76, 0xe0, 0x09, - 0xc4, 0xc0, 0x5c, 0x68, 0x8c, 0xce, 0xa0, 0xd9, 0xb1, 0xe6, 0x92, 0x95, 0x48, 0x69, 0x95, 0xab, - 0x68, 0x45, 0xfb, 0x9e, 0x32, 0x08, 0x4b, 0x55, 0xd1, 0x3a, 0x58, 0xb0, 0x1e, 0x56, 0x62, 0x9d, - 0x33, 0xb9, 0xcc, 0xe7, 0x03, 0xf8, 0x95, 0xc7, 0x69, 0xd5, 0xd6, 0x62, 0xa0, 0x12, 0x1c, 0xf8, - 0xa2, 0x0d, 0x42, 0x92, 0x29, 0x3c, 0xaa, 0x74, 0x16, 0x11, 0x75, 0x33, 0xe1, 0x0b, 0x03, 0xd9, - 0xc1, 0x79, 0x0b, 0x6a, 0xe5, 0xc7, 0x9a, 0x2b, 0x56, 0xd1, 0x1f, 0x56, 0xd6, 0x1f, 0xfa, 0xa1, - 0x80, 0xca, 0xbb, 0x38, 0xeb, 0x0f, 0x2b, 0xdb, 0xdb, 0xc9, 0xd3, 0x91, 0x1d, 0x3c, 0xee, 0x6a, - 0x92, 0xb4, 0xfa, 0xba, 0x6e, 0x63, 0xcd, 0xc5, 0xee, 0xce, 0xbb, 0x56, 0x0e, 0x75, 0xaa, 0x99, - 0xc8, 0x0a, 0x1e, 0x17, 0xe5, 0xc6, 0xd2, 0x25, 0x1f, 0x6b, 0xbe, 0x5a, 0x80, 0x55, 0xfa, 0xce, - 0xa9, 0xbe, 0x6d, 0x3e, 0x43, 0x98, 0x64, 0xc0, 0x1b, 0x20, 0x33, 0xa5, 0x4c, 0x7c, 0x23, 0x8b, - 0x7f, 0x50, 0x74, 0x69, 0xc5, 0x56, 0x55, 0x73, 0xe0, 0xac, 0x9a, 0x8f, 0x30, 0xf6, 0x40, 0x56, - 0xcf, 0x3b, 0xdf, 0xdd, 0x79, 0x37, 0xf2, 0x38, 0xa7, 0x94, 0xc3, 0xfc, 0x06, 0xe1, 0x97, 0x32, - 0xd4, 0x4d, 0x5f, 0xc8, 0xee, 0xaa, 0xba, 0x85, 0xc7, 0x02, 0x5f, 0xe4, 0x20, 0x49, 0x61, 0x17, - 0xba, 0x03, 0xd9, 0x2c, 0x02, 0x9d, 0x72, 0x16, 0xb3, 0x5d, 0x74, 0xd8, 0x27, 0x51, 0xab, 0xd4, - 0x61, 0x57, 0xd7, 0xcd, 0xc0, 0xf5, 0x10, 0x42, 0xee, 0x7f, 0x09, 0x2d, 0xad, 0x5a, 0xdd, 0xc9, - 0xd7, 0xe6, 0x1f, 0xa5, 0x62, 0x6d, 0x72, 0xef, 0xfa, 0x36, 0x6d, 0xe0, 0x91, 0x88, 0xb7, 0x74, - 0x70, 0x32, 0x51, 0xb2, 0xa5, 0x8a, 0x73, 0x39, 0x93, 0x54, 0x29, 0x94, 0x4e, 0x93, 0xc2, 0x40, - 0x56, 0x31, 0x0e, 0xb8, 0x97, 0x69, 0x3b, 0xac, 0xb5, 0x7d, 0xad, 0xa4, 0xad, 0xa5, 0xe6, 0xa1, - 0x52, 0xf2, 0x11, 0x6f, 0x6d, 0xe6, 0x2f, 0x3a, 0xa5, 0x20, 0xf3, 0x57, 0x54, 0x68, 0xb9, 0x0e, - 0x01, 0x5c, 0xa7, 0x96, 0x3b, 0x78, 0xbc, 0xa5, 0x53, 0xfe, 0xaf, 0x9f, 0xdd, 0x7a, 0x39, 0xd4, - 0xa9, 0x66, 0x32, 0xb7, 0xf1, 0xe4, 0x59, 0x6a, 0x11, 0x71, 0x26, 0xa0, 0x2b, 0xec, 0x49, 0x5c, - 0x13, 0x92, 0xca, 0xb6, 0x48, 0x99, 0xd3, 0x95, 0xc9, 0x70, 0x7d, 0x93, 0x7b, 0x1f, 0xaa, 0x69, - 0xad, 0x6a, 0xa2, 0x84, 0x06, 0x26, 0xd3, 0x14, 0xd9, 0x92, 0x7c, 0x84, 0x47, 0xa5, 0x1f, 0xc2, - 0x96, 0xa4, 0x61, 0x94, 0x36, 0xf4, 0x5c, 0x77, 0x47, 0xda, 0xf6, 0x43, 0x70, 0x8a, 0xe0, 0xe6, - 0xbf, 0x13, 0xf8, 0x85, 0xec, 0x18, 0x5b, 0xc9, 0x7c, 0x27, 0x4f, 0x11, 0xae, 0x25, 0x13, 0x87, - 0xdc, 0x2a, 0x86, 0x48, 0xc7, 0x81, 0x6a, 0x5c, 0x6d, 0x40, 0x9a, 0x53, 0x5f, 0xff, 0xf9, 0xf7, - 0x77, 0x03, 0x93, 0xe6, 0x8b, 0xfa, 0xee, 0x3c, 0x58, 0xc8, 0x2f, 0x5b, 0xb1, 0x8c, 0xe6, 0xc8, - 0x0f, 0x08, 0x0f, 0x6e, 0x80, 0x24, 0x53, 0xe7, 0x29, 0x8a, 0x49, 0x75, 0x55, 0x84, 0x25, 0x8d, - 0x60, 0x91, 0xdb, 0xe7, 0x10, 0xec, 0xe3, 0xbc, 0x91, 0x4e, 0xec, 0xe3, 0x72, 0xf9, 0x4e, 0xc8, - 0xb7, 0x08, 0x0f, 0xa9, 0xe1, 0x40, 0x6e, 0x9e, 0x67, 0x2b, 0x8d, 0x26, 0x63, 0xf5, 0x4a, 0x70, - 0x2a, 0x93, 0xf9, 0xba, 0x06, 0xbc, 0x45, 0x6e, 0x5e, 0x0a, 0x48, 0xbe, 0x42, 0xb8, 0x96, 0x34, - 0x62, 0xa7, 0xaa, 0x55, 0x7e, 0x58, 0xc6, 0xcc, 0xc5, 0x2f, 0x24, 0x3d, 0x9c, 0xa9, 0x32, 0xd7, - 0x9b, 0x2a, 0x3f, 0x21, 0x3c, 0xec, 0x80, 0xea, 0xdd, 0x0e, 0x08, 0x95, 0x39, 0x79, 0xd5, 0xaa, - 0xad, 0x68, 0xbe, 0x77, 0x8c, 0x66, 0x2f, 0x7c, 0x76, 0xac, 0xd8, 0x54, 0x67, 0xfd, 0x8c, 0x70, - 0xdd, 0x81, 0xe4, 0x22, 0xec, 0x3b, 0xeb, 0xfb, 0x9a, 0x75, 0xd9, 0xb8, 0xd3, 0x23, 0x6b, 0x82, - 0xa7, 0x70, 0x9f, 0x21, 0x5c, 0x53, 0xb8, 0x21, 0xf4, 0x1d, 0xf6, 0x3d, 0x0d, 0x7b, 0xd7, 0x58, - 0xec, 0x19, 0x36, 0x04, 0x85, 0xfa, 0x1c, 0xe1, 0x91, 0xad, 0xb6, 0x88, 0x80, 0xb5, 0xfa, 0xce, - 0x7a, 0x5f, 0xb3, 0xbe, 0x6b, 0x2c, 0xf5, 0xc4, 0x2a, 0x12, 0x3a, 0x05, 0xfb, 0x0b, 0xc2, 0xa3, - 0xdb, 0x10, 0x87, 0x3e, 0xbb, 0x60, 0xd8, 0x5d, 0x2b, 0xee, 0xaa, 0xc6, 0xbd, 0x67, 0xbc, 0xdd, - 0x13, 0xae, 0xcc, 0xf8, 0x14, 0xf0, 0xf7, 0x7a, 0xec, 0x30, 0xd9, 0xf7, 0xc1, 0xfc, 0x96, 0x66, - 0x7d, 0xd3, 0x34, 0x2f, 0x67, 0x0d, 0x7c, 0xa6, 0x1b, 0xf4, 0x29, 0xc2, 0x23, 0xc9, 0x0d, 0x2f, - 0x3a, 0x4d, 0xeb, 0xe2, 0xaf, 0x8a, 0x41, 0x0a, 0x6f, 0x76, 0xd1, 0x99, 0x1b, 0x7a, 0xb3, 0x55, - 0x72, 0xff, 0xec, 0x66, 0x97, 0xe9, 0x12, 0xf1, 0x96, 0xb0, 0x8f, 0xd3, 0x7f, 0x29, 0x27, 0x76, - 0xc0, 0x3d, 0x31, 0x8f, 0xc8, 0x8f, 0x08, 0x0f, 0x3f, 0xa6, 0xd2, 0xdd, 0xeb, 0xef, 0xa5, 0x71, - 0x4f, 0x13, 0xdf, 0x21, 0xf9, 0xaf, 0x44, 0xc8, 0x18, 0x68, 0xd8, 0x55, 0x45, 0xe7, 0xd1, 0x07, - 0xcb, 0xbf, 0x9d, 0x4e, 0xa3, 0xdf, 0x4f, 0xa7, 0xd1, 0x5f, 0xa7, 0xd3, 0xe8, 0xd3, 0xdb, 0x17, - 0x7e, 0x83, 0x76, 0xf8, 0x68, 0xde, 0xad, 0xe9, 0xef, 0xc9, 0xc5, 0xff, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x4e, 0x70, 0xc1, 0xe2, 0x52, 0x0f, 0x00, 0x00, + // 948 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x97, 0xcf, 0x6f, 0xdc, 0x44, + 0x14, 0xc7, 0x35, 0x49, 0xb3, 0xd9, 0xbc, 0x52, 0x10, 0x03, 0x44, 0x2b, 0x2b, 0x4d, 0xc3, 0x48, + 0x88, 0x2a, 0x2a, 0x76, 0x36, 0x49, 0xa1, 0xa4, 0x2a, 0x25, 0xa4, 0x28, 0x1c, 0x56, 0x50, 0x39, + 0x41, 0x55, 0xb8, 0x4d, 0xec, 0x87, 0x63, 0xb2, 0xf6, 0x18, 0xcf, 0xec, 0x56, 0x25, 0xca, 0x01, + 0x0e, 0x15, 0x37, 0x0e, 0x08, 0x89, 0x3b, 0x12, 0xaa, 0x10, 0xe2, 0xc4, 0x1f, 0xc1, 0x11, 0xc4, + 0x3f, 0x80, 0x22, 0x6e, 0x1c, 0xf9, 0x07, 0xd0, 0x8c, 0xd7, 0x3f, 0x36, 0xd9, 0x44, 0xbb, 0x6c, + 0xf6, 0x36, 0x3f, 0xfc, 0xde, 0xfb, 0xcc, 0xf7, 0xbd, 0x79, 0xb6, 0x81, 0x79, 0x91, 0xef, 0x48, + 0x4c, 0xbb, 0x98, 0x3a, 0x8f, 0x45, 0x7a, 0xf8, 0x69, 0x5b, 0x3c, 0x2e, 0x06, 0x76, 0x92, 0x0a, + 0x25, 0x68, 0x3d, 0x9f, 0x5b, 0x2f, 0x07, 0x22, 0x10, 0x66, 0xd1, 0xd1, 0xa3, 0x6c, 0xdf, 0x5a, + 0x08, 0x84, 0x08, 0xda, 0xe8, 0xf0, 0x24, 0x74, 0x78, 0x1c, 0x0b, 0xc5, 0x55, 0x28, 0x62, 0xd9, + 0xdb, 0x5d, 0x3f, 0xbc, 0x23, 0xed, 0x50, 0xe8, 0xdd, 0x88, 0x7b, 0x07, 0x61, 0x8c, 0xe9, 0x13, + 0x27, 0x39, 0x0c, 0xf4, 0x82, 0x74, 0x22, 0x54, 0xdc, 0xe9, 0x36, 0x9d, 0x00, 0x63, 0x4c, 0xb9, + 0x42, 0xbf, 0x67, 0xb5, 0x15, 0x84, 0xea, 0xa0, 0xb3, 0x6f, 0x7b, 0x22, 0x72, 0x78, 0x6a, 0x82, + 0x7e, 0x66, 0x06, 0xa5, 0x69, 0x81, 0xdb, 0x6d, 0xf2, 0x76, 0x72, 0xc0, 0xcf, 0x3a, 0x61, 0x65, + 0x68, 0xc7, 0x13, 0x29, 0x0e, 0x08, 0xc4, 0xfe, 0x21, 0xf0, 0xca, 0xa3, 0x9e, 0xa7, 0xad, 0x14, + 0xb9, 0x42, 0x17, 0x3f, 0xef, 0xa0, 0x54, 0x74, 0x01, 0xe6, 0x62, 0x1e, 0xa1, 0x4c, 0xb8, 0x87, + 0x0d, 0xb2, 0x44, 0x6e, 0xce, 0xb9, 0xe5, 0x02, 0xdd, 0x83, 0x42, 0x96, 0xc6, 0xd4, 0x12, 0xb9, + 0x79, 0x75, 0xf5, 0x9e, 0x5d, 0x32, 0xdb, 0x39, 0xb3, 0x19, 0xd8, 0xc9, 0x61, 0x60, 0x6b, 0x66, + 0xbb, 0x50, 0x36, 0x67, 0xb6, 0xf3, 0xd8, 0x6e, 0xe1, 0x8e, 0xee, 0xc1, 0x35, 0xcf, 0x90, 0x7c, + 0x94, 0x18, 0x21, 0x1b, 0xd3, 0xc6, 0xff, 0x9a, 0x9d, 0x1d, 0xc7, 0xae, 0x2a, 0x59, 0xba, 0xd6, + 0x4a, 0xda, 0xdd, 0xa6, 0xbd, 0x55, 0x35, 0x75, 0xfb, 0x3d, 0xb1, 0x67, 0x04, 0x68, 0x1e, 0x71, + 0x1b, 0x55, 0x7e, 0x54, 0x06, 0xcf, 0xe5, 0xd1, 0x3f, 0xe4, 0x51, 0x7e, 0xda, 0xbe, 0xb5, 0x7e, + 0x39, 0xa6, 0x4e, 0xcb, 0xf1, 0x10, 0x20, 0x40, 0xd5, 0x0f, 0xbc, 0x32, 0x1c, 0xf0, 0x76, 0x61, + 0xe7, 0x56, 0x7c, 0xb0, 0xaf, 0x09, 0xbc, 0x94, 0xa3, 0xb6, 0x42, 0xa9, 0x86, 0x4b, 0xcb, 0x0e, + 0x5c, 0x6d, 0x87, 0xb2, 0x00, 0xc9, 0x32, 0xd3, 0x1c, 0x0e, 0xa4, 0x55, 0x1a, 0xba, 0x55, 0x2f, + 0xac, 0x53, 0x96, 0xc8, 0xc7, 0x89, 0x5f, 0x29, 0x91, 0xf1, 0x75, 0xb3, 0xa0, 0x1e, 0x61, 0x24, + 0xc2, 0x2f, 0xd0, 0x37, 0xaa, 0xd5, 0xdd, 0x62, 0xce, 0xfe, 0xa8, 0x24, 0xab, 0x25, 0x82, 0xcb, + 0x0b, 0xda, 0x80, 0xd9, 0x44, 0xf8, 0xc6, 0x78, 0xda, 0xec, 0xe5, 0x53, 0x6d, 0xe7, 0x89, 0x58, + 0x71, 0xad, 0x50, 0xe3, 0x4a, 0x66, 0x57, 0x2c, 0xd0, 0x4d, 0x80, 0xb6, 0x08, 0x72, 0x6d, 0x67, + 0x8c, 0xb6, 0xaf, 0x56, 0xb4, 0xb5, 0xf5, 0x25, 0xd3, 0x4a, 0x3e, 0x14, 0x7e, 0xab, 0x78, 0xd0, + 0xad, 0x18, 0xb1, 0x5f, 0x2b, 0xd7, 0xed, 0x01, 0xb6, 0xf1, 0x32, 0xb5, 0xdc, 0x83, 0x6b, 0xbe, + 0x71, 0xf9, 0xbf, 0xee, 0xcd, 0x83, 0xaa, 0xa9, 0xdb, 0xef, 0x89, 0xed, 0xc2, 0xfc, 0x69, 0x6a, + 0x99, 0x88, 0x58, 0xe2, 0x50, 0xd8, 0xf3, 0x50, 0x93, 0x8a, 0xab, 0x8e, 0xec, 0x31, 0xf7, 0x66, + 0x2c, 0x86, 0x7a, 0x4b, 0x04, 0xef, 0xc7, 0x2a, 0x7d, 0xa2, 0x73, 0xa2, 0x85, 0xc6, 0x58, 0xf5, + 0x5c, 0xe4, 0x53, 0xfa, 0x01, 0xcc, 0xa9, 0x30, 0xc2, 0x1d, 0xc5, 0xa3, 0xa4, 0x57, 0xd0, 0xcb, + 0xc3, 0x1d, 0x69, 0x37, 0x8c, 0xd0, 0x2d, 0x8d, 0x57, 0xff, 0x7d, 0x1e, 0x5e, 0xc8, 0x8f, 0xb1, + 0x83, 0x69, 0x37, 0xf4, 0x90, 0x3e, 0x25, 0x50, 0xcb, 0x5a, 0x06, 0xbd, 0x51, 0xb6, 0xa7, 0x81, + 0x1d, 0xd1, 0x1a, 0xaf, 0xc3, 0xb1, 0x85, 0xaf, 0xfe, 0xfc, 0xfb, 0xdb, 0xa9, 0x79, 0xf6, 0xa2, + 0x69, 0xc8, 0xdd, 0x66, 0xd1, 0xc1, 0xe5, 0x06, 0x59, 0xa6, 0xdf, 0x13, 0x98, 0xde, 0x46, 0x45, + 0x17, 0xce, 0x52, 0x94, 0x9d, 0x6a, 0x5c, 0x84, 0x75, 0x83, 0x60, 0xd3, 0x5b, 0x67, 0x10, 0x9c, + 0xa3, 0xa2, 0x90, 0x8e, 0x9d, 0xa3, 0x6a, 0xfa, 0x8e, 0xe9, 0x37, 0x04, 0xae, 0xe8, 0xe6, 0x40, + 0xaf, 0x9f, 0x65, 0xab, 0xb4, 0x26, 0x6b, 0x73, 0x2c, 0x38, 0xed, 0x89, 0xbd, 0x66, 0x00, 0x6f, + 0xd0, 0xeb, 0x17, 0x02, 0xd2, 0x2f, 0x09, 0xd4, 0xb2, 0x42, 0x1c, 0x94, 0xb5, 0xbe, 0x8b, 0x65, + 0x2d, 0x9d, 0xff, 0x40, 0x56, 0xc3, 0xb9, 0x2a, 0xcb, 0xa3, 0xa9, 0xf2, 0x23, 0x81, 0x19, 0x17, + 0x75, 0xed, 0x0e, 0x40, 0xe8, 0xeb, 0x93, 0xe3, 0x66, 0xed, 0x9e, 0xe1, 0x7b, 0xcb, 0x5a, 0x1d, + 0x85, 0xcf, 0x49, 0x35, 0x9b, 0xae, 0xac, 0x9f, 0x09, 0xd4, 0x5d, 0x94, 0x9d, 0xfd, 0x28, 0x54, + 0x13, 0x67, 0x7d, 0xd7, 0xb0, 0x6e, 0x58, 0xb7, 0x47, 0x64, 0xcd, 0xf0, 0x34, 0xee, 0x33, 0x02, + 0x35, 0x8d, 0x1b, 0xe1, 0xc4, 0x61, 0xdf, 0x31, 0xb0, 0x77, 0xac, 0xb5, 0x91, 0x61, 0x23, 0xd4, + 0xa8, 0x3f, 0x11, 0x98, 0xdd, 0xe9, 0xc8, 0x04, 0x63, 0x7f, 0xe2, 0xac, 0xf7, 0x0d, 0xeb, 0xdb, + 0xd6, 0xfa, 0x48, 0xac, 0x32, 0xa3, 0xd3, 0xb0, 0xbf, 0x10, 0x98, 0xdb, 0xc5, 0x34, 0x0a, 0xe3, + 0x73, 0x9a, 0xdd, 0xa5, 0xe2, 0x6e, 0x1a, 0xdc, 0xbb, 0xd6, 0x9b, 0x23, 0xe1, 0xaa, 0x9c, 0x4f, + 0x03, 0x7f, 0x67, 0xda, 0x4e, 0xac, 0x26, 0xde, 0x98, 0xdf, 0x30, 0xac, 0xaf, 0x33, 0x76, 0x31, + 0x6b, 0x3b, 0x8c, 0x4d, 0x81, 0x3e, 0x25, 0x30, 0x9b, 0xbd, 0xe1, 0xe5, 0xa0, 0x6e, 0x5d, 0x7e, + 0xaa, 0x58, 0xb4, 0xdc, 0xcd, 0x5f, 0x74, 0x6c, 0xdb, 0x04, 0xdb, 0xa4, 0xf7, 0x4f, 0x07, 0xbb, + 0x48, 0x97, 0x44, 0xf8, 0xd2, 0x39, 0xea, 0x7d, 0xa5, 0x1c, 0x3b, 0x6d, 0x11, 0xc8, 0x15, 0x42, + 0x7f, 0x20, 0x30, 0xf3, 0x88, 0x2b, 0xef, 0x60, 0xb2, 0x2f, 0x8d, 0xbb, 0x86, 0xf8, 0x36, 0x2d, + 0x6e, 0x89, 0x54, 0x29, 0xf2, 0x68, 0xa8, 0x8c, 0xae, 0x90, 0xf7, 0x36, 0x7e, 0x3b, 0x59, 0x24, + 0xbf, 0x9f, 0x2c, 0x92, 0xbf, 0x4e, 0x16, 0xc9, 0x27, 0xb7, 0xce, 0xfd, 0xb1, 0x19, 0xf0, 0x27, + 0xb6, 0x5f, 0x33, 0x3f, 0x29, 0x6b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x35, 0xfa, 0xad, 0xd6, + 0xa7, 0x0d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1273,106 +1143,6 @@ var _WorkflowService_serviceDesc = grpc.ServiceDesc{ Metadata: "cmd/server/workflow/workflow.proto", } -func (m *SubmitOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubmitOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubmitOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.OwnerReference != nil { - { - size, err := m.OwnerReference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintWorkflow(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if len(m.Labels) > 0 { - i -= len(m.Labels) - copy(dAtA[i:], m.Labels) - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Labels))) - i-- - dAtA[i] = 0x42 - } - if m.ServerDryRun { - i-- - if m.ServerDryRun { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if len(m.ServiceAccount) > 0 { - i -= len(m.ServiceAccount) - copy(dAtA[i:], m.ServiceAccount) - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.ServiceAccount))) - i-- - dAtA[i] = 0x32 - } - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Parameters[iNdEx]) - copy(dAtA[i:], m.Parameters[iNdEx]) - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Parameters[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Entrypoint) > 0 { - i -= len(m.Entrypoint) - copy(dAtA[i:], m.Entrypoint) - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Entrypoint))) - i-- - dAtA[i] = 0x22 - } - if len(m.InstanceID) > 0 { - i -= len(m.InstanceID) - copy(dAtA[i:], m.InstanceID) - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.InstanceID))) - i-- - dAtA[i] = 0x1a - } - if len(m.GenerateName) > 0 { - i -= len(m.GenerateName) - copy(dAtA[i:], m.GenerateName) - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.GenerateName))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintWorkflow(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *WorkflowCreateRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1397,18 +1167,6 @@ func (m *WorkflowCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.SubmitOptions != nil { - { - size, err := m.SubmitOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintWorkflow(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } if m.CreateOptions != nil { { size, err := m.CreateOptions.MarshalToSizedBuffer(dAtA[:i]) @@ -1811,55 +1569,6 @@ func encodeVarintWorkflow(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *SubmitOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovWorkflow(uint64(l)) - } - l = len(m.GenerateName) - if l > 0 { - n += 1 + l + sovWorkflow(uint64(l)) - } - l = len(m.InstanceID) - if l > 0 { - n += 1 + l + sovWorkflow(uint64(l)) - } - l = len(m.Entrypoint) - if l > 0 { - n += 1 + l + sovWorkflow(uint64(l)) - } - if len(m.Parameters) > 0 { - for _, s := range m.Parameters { - l = len(s) - n += 1 + l + sovWorkflow(uint64(l)) - } - } - l = len(m.ServiceAccount) - if l > 0 { - n += 1 + l + sovWorkflow(uint64(l)) - } - if m.ServerDryRun { - n += 2 - } - l = len(m.Labels) - if l > 0 { - n += 1 + l + sovWorkflow(uint64(l)) - } - if m.OwnerReference != nil { - l = m.OwnerReference.Size() - n += 1 + l + sovWorkflow(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - func (m *WorkflowCreateRequest) Size() (n int) { if m == nil { return 0 @@ -1878,10 +1587,6 @@ func (m *WorkflowCreateRequest) Size() (n int) { l = m.CreateOptions.Size() n += 1 + l + sovWorkflow(uint64(l)) } - if m.SubmitOptions != nil { - l = m.SubmitOptions.Size() - n += 1 + l + sovWorkflow(uint64(l)) - } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2057,7 +1762,7 @@ func sovWorkflow(x uint64) (n int) { func sozWorkflow(x uint64) (n int) { return sovWorkflow(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *SubmitOptions) Unmarshal(dAtA []byte) error { +func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2080,15 +1785,15 @@ func (m *SubmitOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SubmitOptions: wiretype end group for non-group") + return fmt.Errorf("proto: WorkflowCreateRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SubmitOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkflowCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2116,13 +1821,13 @@ func (m *SubmitOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowWorkflow @@ -2132,61 +1837,33 @@ func (m *SubmitOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthWorkflow } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthWorkflow } if postIndex > l { return io.ErrUnexpectedEOF } - m.GenerateName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InstanceID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWorkflow - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthWorkflow + if m.Workflow == nil { + m.Workflow = &v1alpha1.Workflow{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Workflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.InstanceID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entrypoint", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CreateOptions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowWorkflow @@ -2196,321 +1873,15 @@ func (m *SubmitOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthWorkflow } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthWorkflow - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entrypoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWorkflow - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthWorkflow - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parameters = append(m.Parameters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWorkflow - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthWorkflow - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccount = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerDryRun", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ServerDryRun = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWorkflow - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthWorkflow - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Labels = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorkflow - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWorkflow - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.OwnerReference == nil { - m.OwnerReference = &v1.OwnerReference{} - } - if err := m.OwnerReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWorkflow(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthWorkflow - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthWorkflow - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowCreateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWorkflow - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthWorkflow - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorkflow - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWorkflow - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Workflow == nil { - m.Workflow = &v1alpha1.Workflow{} - } - if err := m.Workflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorkflow - } - postIndex := iNdEx + msglen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthWorkflow } @@ -2524,42 +1895,6 @@ func (m *WorkflowCreateRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubmitOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorkflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorkflow - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWorkflow - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SubmitOptions == nil { - m.SubmitOptions = &SubmitOptions{} - } - if err := m.SubmitOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipWorkflow(dAtA[iNdEx:]) diff --git a/cmd/server/workflow/workflow.proto b/cmd/server/workflow/workflow.proto index 54420e1e4ae7..abb626972fe0 100644 --- a/cmd/server/workflow/workflow.proto +++ b/cmd/server/workflow/workflow.proto @@ -13,23 +13,10 @@ import "k8s.io/api/core/v1/generated.proto"; // Workflow Service API performs CRUD actions against application resources package workflow; -message SubmitOptions { - string name = 1; - string generateName = 2; - string instanceID = 3; - string entrypoint = 4; - repeated string parameters = 5; - string serviceAccount = 6; - bool serverDryRun = 7; - string labels = 8; - k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference ownerReference = 9; -} - message WorkflowCreateRequest { string namespace = 1; github.com.argoproj.argo.pkg.apis.workflow.v1alpha1.Workflow workflow = 2; k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions createOptions = 3; - SubmitOptions submitOptions = 4; } message WorkflowGetRequest { diff --git a/cmd/server/workflow/workflow.swagger.json b/cmd/server/workflow/workflow.swagger.json index db21ec3f0627..fcad214809d7 100644 --- a/cmd/server/workflow/workflow.swagger.json +++ b/cmd/server/workflow/workflow.swagger.json @@ -4370,42 +4370,6 @@ } } }, - "workflowSubmitOptions": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "generateName": { - "type": "string" - }, - "instanceID": { - "type": "string" - }, - "entrypoint": { - "type": "string" - }, - "parameters": { - "type": "array", - "items": { - "type": "string" - } - }, - "serviceAccount": { - "type": "string" - }, - "serverDryRun": { - "type": "boolean", - "format": "boolean" - }, - "labels": { - "type": "string" - }, - "ownerReference": { - "$ref": "#/definitions/v1OwnerReference" - } - } - }, "workflowWorkflowCreateRequest": { "type": "object", "properties": { @@ -4417,9 +4381,6 @@ }, "createOptions": { "$ref": "#/definitions/v1CreateOptions" - }, - "submitOptions": { - "$ref": "#/definitions/workflowSubmitOptions" } } }, diff --git a/cmd/server/workflow/workflow_server.go b/cmd/server/workflow/workflow_server.go index 8be32a2a1381..60f44d48e879 100644 --- a/cmd/server/workflow/workflow_server.go +++ b/cmd/server/workflow/workflow_server.go @@ -21,32 +21,36 @@ import ( "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" - cmdutil "github.com/argoproj/argo/util/cmd" - "github.com/argoproj/argo/workflow/common" "github.com/argoproj/argo/workflow/config" "github.com/argoproj/argo/workflow/util" "github.com/argoproj/argo/workflow/validate" ) type WorkflowServer struct { - Namespace string - WfClientset *versioned.Clientset - KubeClientset *kubernetes.Clientset - EnableClientAuth bool - Config *config.WorkflowControllerConfig - WfDBService *DBService - WfKubeService *KubeService + namespace string + wfClientset *versioned.Clientset + kubeClientset *kubernetes.Clientset + enableClientAuth bool + config *config.WorkflowControllerConfig + wfDBService *DBService + wfKubeService *KubeService } func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) *WorkflowServer { - wfServer := WorkflowServer{Namespace: namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} + wfServer := WorkflowServer{ + namespace: namespace, + wfClientset: wfClientset, + kubeClientset: kubeClientSet, + enableClientAuth: enableClientAuth, + wfKubeService: NewKubeServer(namespace, wfClientset, kubeClientSet, enableClientAuth), + } if config != nil && config.Persistence != nil { var err error - wfServer.WfDBService, err = NewDBService(kubeClientSet, namespace, config.Persistence) + wfServer.wfDBService, err = NewDBService(kubeClientSet, namespace, config.Persistence) if err != nil { - wfServer.WfDBService = nil + wfServer.wfDBService = nil log.Errorf("Error Creating DB Context. %v", err) - }else { + } else { log.Infof("DB Context created successfully") } } @@ -71,8 +75,8 @@ func (s *WorkflowServer) CreatePersistenceContext(namespace string, kubeClientSe func (s *WorkflowServer) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { md, _ := metadata.FromIncomingContext(ctx) - if !s.EnableClientAuth { - return s.WfClientset, s.KubeClientset, nil + if !s.enableClientAuth { + return s.wfClientset, s.kubeClientset, nil } var restConfigStr, bearerToken string @@ -96,13 +100,13 @@ func (s *WorkflowServer) GetWFClient(ctx context.Context) (*versioned.Clientset, wfClientset, err := wfclientset.NewForConfig(&restConfig) if err != nil { - log.Errorf("Failure to create WfClientset with ClientConfig '%+v': %s", restConfig, err) + log.Errorf("Failure to create wfClientset with ClientConfig '%+v': %s", restConfig, err) return nil, nil, err } clientset, err := kubernetes.NewForConfig(&restConfig) if err != nil { - log.Errorf("Failure to create KubeClientset with ClientConfig '%+v': %s", restConfig, err) + log.Errorf("Failure to create kubeClientset with ClientConfig '%+v': %s", restConfig, err) return nil, nil, err } @@ -115,7 +119,7 @@ func (s *WorkflowServer) Create(ctx context.Context, wfReq *WorkflowCreateReques if err != nil { return nil, err } - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } @@ -125,23 +129,15 @@ func (s *WorkflowServer) Create(ctx context.Context, wfReq *WorkflowCreateReques wfReq.Workflow.Namespace = namespace - wf, err := s.ApplyWorkflowOptions(wfReq.Workflow, wfReq.SubmitOptions) - if err != nil { - return nil, err - } - wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace)) - err = validate.ValidateWorkflow(wftmplGetter, wf, validate.ValidateOpts{}) + err = validate.ValidateWorkflow(wftmplGetter, wfReq.Workflow, validate.ValidateOpts{}) if err != nil { return nil, err } - if wfReq.SubmitOptions != nil && wfReq.SubmitOptions.ServerDryRun { - return util.CreateServerDryRun(wf, wfClient) - } - - wf, err = s.WfKubeService.Create(wfClient, namespace, wfReq.Workflow) + // TODO server dry-run + wf, err := s.wfKubeService.Create(wfClient, wfReq) if err != nil { log.Errorf("Create request is failed. Error: %s", err) @@ -158,13 +154,13 @@ func (s *WorkflowServer) Get(ctx context.Context, wfReq *WorkflowGetRequest) (*v } var wf *v1alpha1.Workflow - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } - if s.WfDBService != nil { - wf, err = s.WfDBService.Get(wfReq.WorkflowName, wfReq.Namespace) + if s.wfDBService != nil { + wf, err = s.wfDBService.Get(wfReq.WorkflowName, wfReq.Namespace) } else { wf, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) @@ -184,7 +180,7 @@ func (s *WorkflowServer) List(ctx context.Context, wfReq *WorkflowListRequest) ( var wfList *v1alpha1.WorkflowList var listOption v1.ListOptions = v1.ListOptions{} - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } @@ -193,13 +189,13 @@ func (s *WorkflowServer) List(ctx context.Context, wfReq *WorkflowListRequest) ( listOption = *wfReq.ListOptions } - if s.WfDBService != nil { + if s.wfDBService != nil { var pagesize uint = 0 if wfReq.ListOptions != nil { pagesize = uint(wfReq.ListOptions.Limit) } - wfList, err = s.WfDBService.List(namespace, pagesize, "") + wfList, err = s.wfDBService.List(namespace, pagesize, "") } else { wfList, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).List(listOption) } @@ -216,13 +212,13 @@ func (s *WorkflowServer) Delete(ctx context.Context, wfReq *WorkflowDeleteReques return nil, err } - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } - if s.WfDBService != nil { - err = s.WfDBService.Delete(wfReq.WorkflowName, wfReq.Namespace) + if s.wfDBService != nil { + err = s.wfDBService.Delete(wfReq.WorkflowName, wfReq.Namespace) if err != nil { return nil, err } @@ -240,7 +236,7 @@ func (s *WorkflowServer) Delete(ctx context.Context, wfReq *WorkflowDeleteReques } func (s *WorkflowServer) Retry(ctx context.Context, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } @@ -270,7 +266,7 @@ func (s *WorkflowServer) Resubmit(ctx context.Context, in *WorkflowUpdateRequest return nil, err } - namespace := s.Namespace + namespace := s.namespace if in.Namespace != "" { namespace = in.Namespace } @@ -297,7 +293,7 @@ func (s *WorkflowServer) Resume(ctx context.Context, wfReq *WorkflowUpdateReques return nil, err } - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } @@ -322,7 +318,7 @@ func (s *WorkflowServer) Suspend(ctx context.Context, wfReq *WorkflowUpdateReque return nil, err } - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } @@ -346,7 +342,7 @@ func (s *WorkflowServer) Terminate(ctx context.Context, wfReq *WorkflowUpdateReq return nil, err } - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } @@ -370,7 +366,7 @@ func (s *WorkflowServer) Lint(ctx context.Context, wfReq *WorkflowCreateRequest) return nil, err } - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } @@ -390,7 +386,7 @@ func (s *WorkflowServer) Watch(wfReq *WorkflowGetRequest, ws WorkflowService_Wat return err } - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } @@ -430,7 +426,7 @@ func (s *WorkflowServer) PodLogs(wfReq *WorkflowLogRequest, log WorkflowService_ return err } - namespace := s.Namespace + namespace := s.namespace if wfReq.Namespace != "" { namespace = wfReq.Namespace } @@ -473,67 +469,3 @@ func (s *WorkflowServer) PodLogs(wfReq *WorkflowLogRequest, log WorkflowService_ } return err } - -func (s *WorkflowServer) ApplyWorkflowOptions(wf *v1alpha1.Workflow, opts *SubmitOptions) (*v1alpha1.Workflow, error) { - if opts == nil { - return wf, nil - } - if opts.Entrypoint != "" { - wf.Spec.Entrypoint = opts.Entrypoint - } - if opts.ServiceAccount != "" { - wf.Spec.ServiceAccountName = opts.ServiceAccount - } - labels := wf.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - if opts.Labels != "" { - passedLabels, err := cmdutil.ParseLabels(opts.Labels) - if err != nil { - return nil, fmt.Errorf("Expected labels of the form: NAME1=VALUE2,NAME2=VALUE2. Received: %s", opts.Labels) - } - for k, v := range passedLabels { - labels[k] = v - } - } - if opts.InstanceID != "" { - labels[common.LabelKeyControllerInstanceID] = opts.InstanceID - } - wf.SetLabels(labels) - if len(opts.Parameters) > 0 { - newParams := make([]v1alpha1.Parameter, 0) - passedParams := make(map[string]bool) - for _, paramStr := range opts.Parameters { - parts := strings.SplitN(paramStr, "=", 2) - if len(parts) == 1 { - return nil, fmt.Errorf("Expected parameter of the form: NAME=VALUE. Received: %s", paramStr) - } - param := v1alpha1.Parameter{ - Name: parts[0], - Value: &parts[1], - } - newParams = append(newParams, param) - passedParams[param.Name] = true - } - - for _, param := range wf.Spec.Arguments.Parameters { - if _, ok := passedParams[param.Name]; ok { - // this parameter was overridden via command line - continue - } - newParams = append(newParams, param) - } - wf.Spec.Arguments.Parameters = newParams - } - if opts.GenerateName != "" { - wf.ObjectMeta.GenerateName = opts.GenerateName - } - if opts.Name != "" { - wf.ObjectMeta.Name = opts.Name - } - if opts.OwnerReference != nil { - wf.SetOwnerReferences(append(wf.GetOwnerReferences(), *opts.OwnerReference)) - } - return wf, nil -} diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go index 1e051ceecb97..1a07aeae20d3 100644 --- a/cmd/server/workflow/workflow_service.go +++ b/cmd/server/workflow/workflow_service.go @@ -15,30 +15,29 @@ import ( "github.com/argoproj/argo/pkg/client/clientset/versioned" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" "github.com/argoproj/argo/workflow/util" - ) type KubeService struct { - Namespace string - WfClientset *versioned.Clientset - KubeClientset *kubernetes.Clientset - EnableClientAuth bool + namespace string + wfClientset *versioned.Clientset + kubeClientset *kubernetes.Clientset + enableClientAuth bool } func NewKubeServer(Namespace string, wfClientset *wfclientset.Clientset, kubeClientSet *kubernetes.Clientset, enableClientAuth bool) *KubeService { return &KubeService{ - Namespace: Namespace, - WfClientset: wfClientset, - KubeClientset: kubeClientSet, - EnableClientAuth: enableClientAuth, + namespace: Namespace, + wfClientset: wfClientset, + kubeClientset: kubeClientSet, + enableClientAuth: enableClientAuth, } } func (s *KubeService) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { md, _ := metadata.FromIncomingContext(ctx) - if s.EnableClientAuth { - return s.WfClientset, s.KubeClientset, nil + if s.enableClientAuth { + return s.wfClientset, s.kubeClientset, nil } var restConfigStr, bearerToken string @@ -62,21 +61,21 @@ func (s *KubeService) GetWFClient(ctx context.Context) (*versioned.Clientset, *k wfClientset, err := wfclientset.NewForConfig(&restConfig) if err != nil { - log.Errorf("Failure to create WfClientset with ClientConfig '%+v': %s", restConfig, err) + log.Errorf("Failure to create wfClientset with ClientConfig '%+v': %s", restConfig, err) return nil, nil, err } clientset, err := kubernetes.NewForConfig(&restConfig) if err != nil { - log.Errorf("Failure to create KubeClientset with ClientConfig '%+v': %s", restConfig, err) + log.Errorf("Failure to create kubeClientset with ClientConfig '%+v': %s", restConfig, err) return nil, nil, err } return wfClientset, clientset, nil } -func (s *KubeService) Create(wfClient *versioned.Clientset, namespace string, wf *v1alpha1.Workflow) (*v1alpha1.Workflow, error) { - createdWf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Create(wf) +func (s *KubeService) Create(wfClient *versioned.Clientset, wfReq *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { + createdWf, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Create(wfReq.Workflow) if err != nil { log.Warnf("Create request is failed. Error: %s", err) return nil, err @@ -86,8 +85,8 @@ func (s *KubeService) Create(wfClient *versioned.Clientset, namespace string, wf return createdWf, nil } -func (s *KubeService) Get(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowGetRequest) (*v1alpha1.Workflow, error) { - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) +func (s *KubeService) Get(wfClient *versioned.Clientset, wfReq *WorkflowGetRequest) (*v1alpha1.Workflow, error) { + wf, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } diff --git a/cmd/server/workflowtemplate/workflow_template_server.go b/cmd/server/workflowtemplate/workflow_template_server.go index da19165a5f97..81d79c317455 100644 --- a/cmd/server/workflowtemplate/workflow_template_server.go +++ b/cmd/server/workflowtemplate/workflow_template_server.go @@ -1,19 +1,18 @@ package workflowtemplate import ( - context "context" + "context" "encoding/json" "errors" "fmt" - log "github.com/sirupsen/logrus" "google.golang.org/grpc/metadata" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" - common "github.com/argoproj/argo/cmd/server/common" + "github.com/argoproj/argo/cmd/server/common" "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" "github.com/argoproj/argo/pkg/client/clientset/versioned" wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" @@ -23,24 +22,22 @@ import ( ) type WorkflowTemplateServer struct { - Namespace string - WfClientset *versioned.Clientset - KubeClientset *kubernetes.Clientset - EnableClientAuth bool - Config *config.WorkflowControllerConfig + namespace string + wfClientset *versioned.Clientset + kubeClientset *kubernetes.Clientset + enableClientAuth bool + config *config.WorkflowControllerConfig } func NewWorkflowTemplateServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) *WorkflowTemplateServer { - wfTmplServer := WorkflowTemplateServer{Namespace: namespace, WfClientset: wfClientset, KubeClientset: kubeClientSet, EnableClientAuth: enableClientAuth} - - return &wfTmplServer + return &WorkflowTemplateServer{namespace: namespace, wfClientset: wfClientset, kubeClientset: kubeClientSet, enableClientAuth: enableClientAuth} } func (s *WorkflowTemplateServer) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { md, _ := metadata.FromIncomingContext(ctx) - if !s.EnableClientAuth { - return s.WfClientset, s.KubeClientset, nil + if !s.enableClientAuth { + return s.wfClientset, s.kubeClientset, nil } var restConfigStr, bearerToken string @@ -64,13 +61,13 @@ func (s *WorkflowTemplateServer) GetWFClient(ctx context.Context) (*versioned.Cl wfClientset, err := wfclientset.NewForConfig(&restConfig) if err != nil { - log.Errorf("Failure to create WfClientset with ClientConfig '%+v': %s", restConfig, err) + log.Errorf("Failure to create wfClientset with ClientConfig '%+v': %s", restConfig, err) return nil, nil, err } clientset, err := kubernetes.NewForConfig(&restConfig) if err != nil { - log.Errorf("Failure to create KubeClientset with ClientConfig '%+v': %s", restConfig, err) + log.Errorf("Failure to create kubeClientset with ClientConfig '%+v': %s", restConfig, err) return nil, nil, err } @@ -82,7 +79,7 @@ func (wts *WorkflowTemplateServer) Create(ctx context.Context, wftmplReq *Workfl if err != nil { return nil, err } - namespace := wts.Namespace + namespace := wts.namespace if wftmplReq.Namespace != "" { namespace = wftmplReq.Namespace } @@ -91,7 +88,6 @@ func (wts *WorkflowTemplateServer) Create(ctx context.Context, wftmplReq *Workfl } wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace)) - err = validate.ValidateWorkflowTemplate(wftmplGetter, wftmplReq.Template) if err != nil { return nil, fmt.Errorf("Failed to create workflow template: %v", err) @@ -112,7 +108,7 @@ func (wts *WorkflowTemplateServer) Get(ctx context.Context, wftmplReq *WorkflowT return nil, err } - namespace := wts.Namespace + namespace := wts.namespace if wftmplReq.Namespace != "" { namespace = wftmplReq.Namespace } @@ -132,7 +128,7 @@ func (wts *WorkflowTemplateServer) List(ctx context.Context, wftmplReq *Workflow return nil, err } - namespace := wts.Namespace + namespace := wts.namespace if wftmplReq.Namespace != "" { namespace = wftmplReq.Namespace } @@ -152,7 +148,7 @@ func (wts *WorkflowTemplateServer) Delete(ctx context.Context, wftmplReq *Workfl return nil, err } - namespace := wts.Namespace + namespace := wts.namespace if wftmplReq.Namespace != "" { namespace = wftmplReq.Namespace } @@ -174,7 +170,7 @@ func (wts *WorkflowTemplateServer) Lint(ctx context.Context, wftmplReq *Workflow return nil, err } - namespace := wts.Namespace + namespace := wts.namespace if wftmplReq.Namespace != "" { namespace = wftmplReq.Namespace } diff --git a/pkg/apiclient/apiclient.go b/pkg/apiclient/apiclient.go index 2eafa609d39e..6f068077b98e 100644 --- a/pkg/apiclient/apiclient.go +++ b/pkg/apiclient/apiclient.go @@ -1,10 +1,6 @@ package apiclient const ( - - // EnvArgoCDServer is the environment variable to look for an Argo CD server address - EnvArgoServer = "ARGO_SERVER" - // MaxGRPCMessageSize contains max grpc message size MaxGRPCMessageSize = 100 * 1024 * 1024 ) diff --git a/test/e2e/argo_server_test.go b/test/e2e/argo_server_test.go new file mode 100644 index 000000000000..2d545f4e52fc --- /dev/null +++ b/test/e2e/argo_server_test.go @@ -0,0 +1,94 @@ +package e2e + +import ( + "bytes" + "encoding/json" + wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" + "github.com/argoproj/argo/test/e2e/fixtures" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "io/ioutil" + "net/http" + "testing" +) + +const baseUrl = "http://localhost:2746/api/v1" + +type ArgoServerSuite struct { + fixtures.E2ESuite +} + +// ensure basic HTTP functionality works, +// testing behaviour really is a non-goal +func (s *ArgoServerSuite) TestArgoServer() { + t := s.T() + t.Run("CreateWorkflow", func(t *testing.T) { + resp, err := http.Post(baseUrl+"/workflows", "json", bytes.NewBuffer([]byte(`{ + "namespace": "argo", + "workflow": { + "metadata": { + "name": "test", + "labels": { + "argo-e2e": "true" + } + }, + "spec": { + "templates": [ + { + "name": "run-workflow", + "container": { + "name": "", + "image": "docker/whalesay:latest" + } + } + ], + "entrypoint": "run-workflow" + } + } +}`))) + if assert.NoError(t, err) { + // GRPC is non-standard for return codes, 200 rather than 201 + assert.Equal(t, "200 OK", resp.Status) + body, err := ioutil.ReadAll(resp.Body) + assert.NoError(t, err) + // make sure we can un-marshall the response + err = json.Unmarshal(body, &wfv1.Workflow{}) + assert.NoError(t, err) + } + + }) + t.Run("ListWorkflows", func(t *testing.T) { + resp, err := http.Get(baseUrl + "/workflows/argo") + if assert.NoError(t, err) { + assert.Equal(t, "200 OK", resp.Status) + body, err := ioutil.ReadAll(resp.Body) + assert.NoError(t, err) + workflows := &wfv1.WorkflowList{} + err = json.Unmarshal(body, workflows) + assert.NoError(t, err) + assert.Len(t, workflows.Items, 1) + } + }) + t.Run("GetWorkflow", func(t *testing.T) { + resp, err := http.Get(baseUrl + "/workflows/argo/test") + if assert.NoError(t, err) { + assert.Equal(t, "200 OK", resp.Status) + body, err := ioutil.ReadAll(resp.Body) + assert.NoError(t, err) + err = json.Unmarshal(body, &wfv1.Workflow{}) + assert.NoError(t, err) + } + }) + t.Run("DeleteWorkflow", func(t *testing.T) { + req, err := http.NewRequest("DELETE", baseUrl+"/workflows/argo/test", nil) + assert.NoError(t, err) + resp, err := http.DefaultClient.Do(req) + if assert.NoError(t, err) { + assert.Equal(t, "200 OK", resp.Status) + } + }) +} + +func TestArgoServerSuite(t *testing.T) { + suite.Run(t, new(ArgoServerSuite)) +} diff --git a/util/grpc/interceptor.go b/util/grpc/interceptor.go new file mode 100644 index 000000000000..35791f4886bb --- /dev/null +++ b/util/grpc/interceptor.go @@ -0,0 +1,36 @@ +package grpc + +import ( + "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "runtime/debug" +) + +// PanicLoggerUnaryServerInterceptor returns a new unary server interceptor for recovering from panics and returning error +func PanicLoggerUnaryServerInterceptor(log *logrus.Entry) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (_ interface{}, err error) { + defer func() { + if r := recover(); r != nil { + log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack()) + err = status.Errorf(codes.Internal, "%s", r) + } + }() + return handler(ctx, req) + } +} + +// PanicLoggerStreamServerInterceptor returns a new streaming server interceptor for recovering from panics and returning error +func PanicLoggerStreamServerInterceptor(log *logrus.Entry) grpc.StreamServerInterceptor { + return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) (err error) { + defer func() { + if r := recover(); r != nil { + log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack()) + err = status.Errorf(codes.Internal, "%s", r) + } + }() + return handler(srv, stream) + } +} From 7dacff6d5ad299fe1ddeb1689f0f88895009464f Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Mon, 16 Dec 2019 21:23:09 -0800 Subject: [PATCH 019/421] lint --- Makefile | 4 ---- cmd/server/apiserver/argoserver.go | 1 - cmd/server/workflow/workflow_server.go | 1 - cmd/server/workflowtemplate/workflow_template_server.go | 1 - 4 files changed, 7 deletions(-) diff --git a/Makefile b/Makefile index 4b904b68b910..ba370d3b26cc 100644 --- a/Makefile +++ b/Makefile @@ -137,10 +137,6 @@ argo-server-darwin: argo-server-windows: CGO_ENABLED=0 GOARCH=amd64 GOOS=windows go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argo-server-windows-amd64 ./cmd/server - - - - .PHONY: executor executor: go build -v -i -ldflags '${LDFLAGS}' -o ${DIST_DIR}/argoexec ./cmd/argoexec diff --git a/cmd/server/apiserver/argoserver.go b/cmd/server/apiserver/argoserver.go index 8d84edfa6b7b..159eedef916a 100644 --- a/cmd/server/apiserver/argoserver.go +++ b/cmd/server/apiserver/argoserver.go @@ -38,7 +38,6 @@ type ArgoServer struct { wfClientSet *versioned.Clientset enableClientAuth bool insecure bool - config *config.WorkflowControllerConfig configName string stopCh chan struct{} } diff --git a/cmd/server/workflow/workflow_server.go b/cmd/server/workflow/workflow_server.go index 60f44d48e879..28d190bdd525 100644 --- a/cmd/server/workflow/workflow_server.go +++ b/cmd/server/workflow/workflow_server.go @@ -31,7 +31,6 @@ type WorkflowServer struct { wfClientset *versioned.Clientset kubeClientset *kubernetes.Clientset enableClientAuth bool - config *config.WorkflowControllerConfig wfDBService *DBService wfKubeService *KubeService } diff --git a/cmd/server/workflowtemplate/workflow_template_server.go b/cmd/server/workflowtemplate/workflow_template_server.go index 81d79c317455..3c170cfa874f 100644 --- a/cmd/server/workflowtemplate/workflow_template_server.go +++ b/cmd/server/workflowtemplate/workflow_template_server.go @@ -26,7 +26,6 @@ type WorkflowTemplateServer struct { wfClientset *versioned.Clientset kubeClientset *kubernetes.Clientset enableClientAuth bool - config *config.WorkflowControllerConfig } func NewWorkflowTemplateServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) *WorkflowTemplateServer { From 520844ba30bbc4719902a031c64505eff09770e6 Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Tue, 17 Dec 2019 04:35:07 -0800 Subject: [PATCH 020/421] cache --- .circleci/config.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 21ccb4a21e75..d87a19559676 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -8,13 +8,14 @@ commands: - restore_cache: name: Restore vendor cache keys: + - vendor-v2-{{ checksum "Gopkg.lock" }}-{{ .Environment.CIRCLE_JOB }} - vendor-v1-{{ checksum "Gopkg.lock" }}-{{ .Environment.CIRCLE_JOB }} - run: name: Ensure dependencies command: dep ensure -v - save_cache: name: Save vendor cache - key: vendor-v1-{{ checksum "Gopkg.lock" }}-{{ .Environment.CIRCLE_JOB }} + key: vendor-v2-{{ checksum "Gopkg.lock" }}-{{ .Environment.CIRCLE_JOB }} paths: - vendor install_golang: @@ -30,7 +31,7 @@ commands: steps: - save_cache: name: Save Golang cache - key: go-v1-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }} + key: go-v2-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }} # https://circleci.com/docs/2.0/language-go/ paths: - /home/circleci/.cache/go-build @@ -40,6 +41,8 @@ commands: - restore_cache: name: Restore Golang cache keys: + - go-v2-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }} + - go-v2-master-{{ .Environment.CIRCLE_JOB }} - go-v1-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }} - go-v1-master-{{ .Environment.CIRCLE_JOB }} jobs: From 2275984dc4d28cb794e6968789c9953b0f9cea4c Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Tue, 17 Dec 2019 04:41:45 -0800 Subject: [PATCH 021/421] probe --- .circleci/config.yml | 2 +- .../argo-server/argo-server-deployment.yaml | 25 +++++++++---------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d87a19559676..7d47efcb892d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -76,7 +76,7 @@ jobs: cp /etc/rancher/k3s/k3s.yaml ~/.kube/config - run: KUBECONFIG=~/.kube/config make start-e2e - run: - name: Follow controller logs, to help diagnose failures + name: Follow logs, to help diagnose failures command: | sleep 10 make logs-e2e diff --git a/manifests/base/argo-server/argo-server-deployment.yaml b/manifests/base/argo-server/argo-server-deployment.yaml index 0a9ece5afcc3..cf1753b57ddc 100644 --- a/manifests/base/argo-server/argo-server-deployment.yaml +++ b/manifests/base/argo-server/argo-server-deployment.yaml @@ -18,16 +18,15 @@ spec: ports: - containerPort: 2746 hostPort: 2746 - # TODO - need to fix this - #readinessProbe: - # httpGet: - # path: /api/v1/workflows/argo - # port: 2746 - # initialDelaySeconds: 5 - # periodSeconds: 10 - #livenessProbe: - # httpGet: - # path: /api/v1/workflows/argo - # port: 2746 - # initialDelaySeconds: 5 - # periodSeconds: 10 \ No newline at end of file + readinessProbe: + httpGet: + path: /api/v1/workflows/argo + port: 2746 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + httpGet: + path: /api/v1/workflows/argo + port: 2746 + initialDelaySeconds: 5 + periodSeconds: 10 \ No newline at end of file From 51ce066b2c9ce56fffec593f13699af261927ed1 Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Tue, 17 Dec 2019 05:06:27 -0800 Subject: [PATCH 022/421] nodePort --- Makefile | 16 +++++----------- test/e2e/argo_server_test.go | 2 +- 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index ba370d3b26cc..1a6353363637 100644 --- a/Makefile +++ b/Makefile @@ -211,12 +211,11 @@ start-e2e: kubectl -n argo scale deployment/workflow-controller --replicas 0 kubectl -n argo scale deployment/argo-server --replicas 0 # Change to use a "dev" tag and enable debug logging. - kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}]' - kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/workflow-controller:dev"}]' - kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--executor-image", "argoproj/argoexec:dev", "--executor-image-pull-policy", "Never"]}]' - kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}]' - kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/argo-server:dev"}]' - kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--insecure"]}]' + kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}, {"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/workflow-controller:dev"}, {"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--executor-image", "argoproj/argoexec:dev", "--executor-image-pull-policy", "Never"]}]' + kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}, {"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/argo-server:dev"}, {"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--insecure"]}]' + kubectl -n argo patch svc/argo-ui --type json --patch '[{"op": "add", "path": "/spec/ports/0/nodePort", "value": 30001}, {"op": "add", "path": "/spec/type", "value": "NodePort"}]' + kubectl -n argo patch svc/minio --type json --patch '[{"op": "add", "path": "/spec/ports/0/nodePort", "value": 30000}, {"op": "add", "path": "/spec/type", "value": "NodePort"}]' + kubectl -n argo patch svc/argo-server --type json --patch '[{"op": "add", "path": "/spec/ports/0/nodePort", "value": 32746}, {"op": "add", "path": "/spec/type", "value": "NodePort"}]' # Install MinIO and set-up config-map. kubectl -n argo apply --wait --force -f test/e2e/manifests # Build controller and executor images. @@ -228,11 +227,6 @@ start-e2e: kubectl -n argo wait --for=condition=Ready pod --all -l app=workflow-controller kubectl -n argo wait --for=condition=Ready pod --all -l app=argo-server kubectl -n argo wait --for=condition=Ready pod --all -l app=minio - # Set-up port-forwards - killall kubectl || true - kubectl -n argo port-forward deployment/argo-ui 8001:8001 & - kubectl -n argo port-forward svc/minio 9000:9000 & - kubectl -n argo port-forward svc/argo-server 2746:2746 & # Switch to "argo" ns. kubectl config set-context --current --namespace=argo # Pull whalesay. This is used a lot in the tests, so good to have it ready now. diff --git a/test/e2e/argo_server_test.go b/test/e2e/argo_server_test.go index 2d545f4e52fc..9752fbd2bd45 100644 --- a/test/e2e/argo_server_test.go +++ b/test/e2e/argo_server_test.go @@ -12,7 +12,7 @@ import ( "testing" ) -const baseUrl = "http://localhost:2746/api/v1" +const baseUrl = "http://localhost:32746/api/v1" type ArgoServerSuite struct { fixtures.E2ESuite From 5b8db5c118240c381a0eb3175e6224458e0d35ed Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Tue, 17 Dec 2019 05:29:51 -0800 Subject: [PATCH 023/421] local-ok --- Makefile | 8 ++++--- .../argo-server/argo-server-deployment.yaml | 21 ++++++++----------- test/e2e/argo_server_test.go | 2 +- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/Makefile b/Makefile index 1a6353363637..fb7356db3983 100644 --- a/Makefile +++ b/Makefile @@ -213,9 +213,6 @@ start-e2e: # Change to use a "dev" tag and enable debug logging. kubectl -n argo patch deployment/workflow-controller --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}, {"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/workflow-controller:dev"}, {"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--executor-image", "argoproj/argoexec:dev", "--executor-image-pull-policy", "Never"]}]' kubectl -n argo patch deployment/argo-server --type json --patch '[{"op": "replace", "path": "/spec/template/spec/containers/0/imagePullPolicy", "value": "Never"}, {"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "argoproj/argo-server:dev"}, {"op": "replace", "path": "/spec/template/spec/containers/0/args", "value": ["--loglevel", "debug", "--insecure"]}]' - kubectl -n argo patch svc/argo-ui --type json --patch '[{"op": "add", "path": "/spec/ports/0/nodePort", "value": 30001}, {"op": "add", "path": "/spec/type", "value": "NodePort"}]' - kubectl -n argo patch svc/minio --type json --patch '[{"op": "add", "path": "/spec/ports/0/nodePort", "value": 30000}, {"op": "add", "path": "/spec/type", "value": "NodePort"}]' - kubectl -n argo patch svc/argo-server --type json --patch '[{"op": "add", "path": "/spec/ports/0/nodePort", "value": 32746}, {"op": "add", "path": "/spec/type", "value": "NodePort"}]' # Install MinIO and set-up config-map. kubectl -n argo apply --wait --force -f test/e2e/manifests # Build controller and executor images. @@ -227,6 +224,11 @@ start-e2e: kubectl -n argo wait --for=condition=Ready pod --all -l app=workflow-controller kubectl -n argo wait --for=condition=Ready pod --all -l app=argo-server kubectl -n argo wait --for=condition=Ready pod --all -l app=minio + # Set-up port-forwards + killall kubectl || true + kubectl -n argo port-forward deployment/argo-ui 8001:8001 & + kubectl -n argo port-forward svc/minio 9000:9000 & + kubectl -n argo port-forward svc/argo-server 2746:2746 & # Switch to "argo" ns. kubectl config set-context --current --namespace=argo # Pull whalesay. This is used a lot in the tests, so good to have it ready now. diff --git a/manifests/base/argo-server/argo-server-deployment.yaml b/manifests/base/argo-server/argo-server-deployment.yaml index cf1753b57ddc..0f2d628ab962 100644 --- a/manifests/base/argo-server/argo-server-deployment.yaml +++ b/manifests/base/argo-server/argo-server-deployment.yaml @@ -18,15 +18,12 @@ spec: ports: - containerPort: 2746 hostPort: 2746 - readinessProbe: - httpGet: - path: /api/v1/workflows/argo - port: 2746 - initialDelaySeconds: 5 - periodSeconds: 10 - livenessProbe: - httpGet: - path: /api/v1/workflows/argo - port: 2746 - initialDelaySeconds: 5 - periodSeconds: 10 \ No newline at end of file + #readinessProbe: + # httpGet: + # path: /api/v1/workflows/argo + # port: 2746 + # httpHeaders: + # - name: Accept + # value: "*/*" + # initialDelaySeconds: 5 + # periodSeconds: 10 diff --git a/test/e2e/argo_server_test.go b/test/e2e/argo_server_test.go index 9752fbd2bd45..2d545f4e52fc 100644 --- a/test/e2e/argo_server_test.go +++ b/test/e2e/argo_server_test.go @@ -12,7 +12,7 @@ import ( "testing" ) -const baseUrl = "http://localhost:32746/api/v1" +const baseUrl = "http://localhost:2746/api/v1" type ArgoServerSuite struct { fixtures.E2ESuite From 09db6d227cd2eca74b424da934189c89bfe5b6fc Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Tue, 17 Dec 2019 05:32:05 -0800 Subject: [PATCH 024/421] delete-options --- test/e2e/fixtures/e2e_suite.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/fixtures/e2e_suite.go b/test/e2e/fixtures/e2e_suite.go index 1808e5478aa2..d30dae466870 100644 --- a/test/e2e/fixtures/e2e_suite.go +++ b/test/e2e/fixtures/e2e_suite.go @@ -63,7 +63,7 @@ func (s *E2ESuite) BeforeTest(_, _ string) { for _, wf := range list.Items { logCtx := log.WithFields(log.Fields{"test": s.T().Name(), "workflow": wf.Name}) logCtx.Infof("Deleting workflow") - err = s.client.Delete(wf.Name, nil) + err = s.client.Delete(wf.Name, &metav1.DeleteOptions{}) if err != nil { panic(err) } From deec313dc208673ca5c67ed440de87eda333f86b Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Tue, 17 Dec 2019 05:33:29 -0800 Subject: [PATCH 025/421] port-forwards --- .circleci/config.yml | 4 ++++ Makefile | 12 ++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 7d47efcb892d..37ce9d7ad43b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -75,6 +75,10 @@ jobs: mkdir -p ~/.kube cp /etc/rancher/k3s/k3s.yaml ~/.kube/config - run: KUBECONFIG=~/.kube/config make start-e2e + - run: + name: Re-establish port forward + command: make port-forward-e2e + background: true - run: name: Follow logs, to help diagnose failures command: | diff --git a/Makefile b/Makefile index fb7356db3983..f61db88a6b3f 100644 --- a/Makefile +++ b/Makefile @@ -225,15 +225,19 @@ start-e2e: kubectl -n argo wait --for=condition=Ready pod --all -l app=argo-server kubectl -n argo wait --for=condition=Ready pod --all -l app=minio # Set-up port-forwards - killall kubectl || true - kubectl -n argo port-forward deployment/argo-ui 8001:8001 & - kubectl -n argo port-forward svc/minio 9000:9000 & - kubectl -n argo port-forward svc/argo-server 2746:2746 & + make port-forward-e2e # Switch to "argo" ns. kubectl config set-context --current --namespace=argo # Pull whalesay. This is used a lot in the tests, so good to have it ready now. docker pull docker/whalesay:latest +.PHONY: port-forward-e2e +port-forward-e2e: + killall kubectl || true + kubectl -n argo port-forward deployment/argo-ui 8001:8001 & + kubectl -n argo port-forward svc/minio 9000:9000 & + kubectl -n argo port-forward svc/argo-server 2746:2746 & + .PHONY: logs-e2e logs-e2e: kubectl -n argo logs -f -l app From 90bbfde2ff0abbee985a79c1d13854a74891fe08 Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Tue, 17 Dec 2019 05:44:46 -0800 Subject: [PATCH 026/421] delete --- .circleci/config.yml | 2 +- cmd/server/workflow/workflow_server.go | 109 +++----------- cmd/server/workflow/workflow_service.go | 185 ------------------------ 3 files changed, 23 insertions(+), 273 deletions(-) delete mode 100644 cmd/server/workflow/workflow_service.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 37ce9d7ad43b..53ab91df0a71 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -77,7 +77,7 @@ jobs: - run: KUBECONFIG=~/.kube/config make start-e2e - run: name: Re-establish port forward - command: make port-forward-e2e + command: make port-forward-e2e && wait background: true - run: name: Follow logs, to help diagnose failures diff --git a/cmd/server/workflow/workflow_server.go b/cmd/server/workflow/workflow_server.go index 28d190bdd525..d49e669ab37e 100644 --- a/cmd/server/workflow/workflow_server.go +++ b/cmd/server/workflow/workflow_server.go @@ -5,9 +5,10 @@ import ( "encoding/json" "errors" "fmt" - "github.com/argoproj/argo/workflow/templateresolution" "strings" + "github.com/argoproj/argo/workflow/templateresolution" + log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc/metadata" @@ -27,21 +28,17 @@ import ( ) type WorkflowServer struct { - namespace string wfClientset *versioned.Clientset kubeClientset *kubernetes.Clientset enableClientAuth bool wfDBService *DBService - wfKubeService *KubeService } func NewWorkflowServer(namespace string, wfClientset *versioned.Clientset, kubeClientSet *kubernetes.Clientset, config *config.WorkflowControllerConfig, enableClientAuth bool) *WorkflowServer { wfServer := WorkflowServer{ - namespace: namespace, wfClientset: wfClientset, kubeClientset: kubeClientSet, enableClientAuth: enableClientAuth, - wfKubeService: NewKubeServer(namespace, wfClientset, kubeClientSet, enableClientAuth), } if config != nil && config.Persistence != nil { var err error @@ -118,17 +115,11 @@ func (s *WorkflowServer) Create(ctx context.Context, wfReq *WorkflowCreateReques if err != nil { return nil, err } - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } if wfReq.Workflow == nil { return nil, fmt.Errorf("workflow body not specified") } - wfReq.Workflow.Namespace = namespace - - wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace)) + wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(wfReq.Namespace)) err = validate.ValidateWorkflow(wftmplGetter, wfReq.Workflow, validate.ValidateOpts{}) if err != nil { @@ -136,8 +127,7 @@ func (s *WorkflowServer) Create(ctx context.Context, wfReq *WorkflowCreateReques } // TODO server dry-run - wf, err := s.wfKubeService.Create(wfClient, wfReq) - + wf, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Create(wfReq.Workflow) if err != nil { log.Errorf("Create request is failed. Error: %s", err) return nil, err @@ -153,16 +143,10 @@ func (s *WorkflowServer) Get(ctx context.Context, wfReq *WorkflowGetRequest) (*v } var wf *v1alpha1.Workflow - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } - if s.wfDBService != nil { wf, err = s.wfDBService.Get(wfReq.WorkflowName, wfReq.Namespace) } else { - - wf, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) + wf, err = wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) } if err != nil { return nil, err @@ -178,12 +162,7 @@ func (s *WorkflowServer) List(ctx context.Context, wfReq *WorkflowListRequest) ( } var wfList *v1alpha1.WorkflowList - var listOption v1.ListOptions = v1.ListOptions{} - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } - + var listOption = v1.ListOptions{} if wfReq.ListOptions != nil { listOption = *wfReq.ListOptions } @@ -193,10 +172,9 @@ func (s *WorkflowServer) List(ctx context.Context, wfReq *WorkflowListRequest) ( if wfReq.ListOptions != nil { pagesize = uint(wfReq.ListOptions.Limit) } - - wfList, err = s.wfDBService.List(namespace, pagesize, "") + wfList, err = s.wfDBService.List(wfReq.Namespace, pagesize, "") } else { - wfList, err = wfClient.ArgoprojV1alpha1().Workflows(namespace).List(listOption) + wfList, err = wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).List(listOption) } if err != nil { return nil, err @@ -211,11 +189,6 @@ func (s *WorkflowServer) Delete(ctx context.Context, wfReq *WorkflowDeleteReques return nil, err } - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } - if s.wfDBService != nil { err = s.wfDBService.Delete(wfReq.WorkflowName, wfReq.Namespace) if err != nil { @@ -223,7 +196,7 @@ func (s *WorkflowServer) Delete(ctx context.Context, wfReq *WorkflowDeleteReques } } - err = wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(wfReq.WorkflowName, &v1.DeleteOptions{}) + err = wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Delete(wfReq.WorkflowName, &v1.DeleteOptions{}) if err != nil { return nil, err } @@ -235,23 +208,18 @@ func (s *WorkflowServer) Delete(ctx context.Context, wfReq *WorkflowDeleteReques } func (s *WorkflowServer) Retry(ctx context.Context, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } - wfClient, kubeClient, err := s.GetWFClient(ctx) if err != nil { return nil, err } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } - wf, err = util.RetryWorkflow(kubeClient, wfClient.ArgoprojV1alpha1().Workflows(namespace), wf) + wf, err = util.RetryWorkflow(kubeClient, wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace), wf) if err != nil { return nil, err @@ -265,12 +233,7 @@ func (s *WorkflowServer) Resubmit(ctx context.Context, in *WorkflowUpdateRequest return nil, err } - namespace := s.namespace - if in.Namespace != "" { - namespace = in.Namespace - } - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(in.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(in.Namespace).Get(in.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } @@ -278,7 +241,7 @@ func (s *WorkflowServer) Resubmit(ctx context.Context, in *WorkflowUpdateRequest if err != nil { return nil, err } - created, err := util.SubmitWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfClient, namespace, newWF, nil) + created, err := util.SubmitWorkflow(wfClient.ArgoprojV1alpha1().Workflows(in.Namespace), wfClient, in.Namespace, newWF, nil) if err != nil { return nil, err } @@ -292,18 +255,13 @@ func (s *WorkflowServer) Resume(ctx context.Context, wfReq *WorkflowUpdateReques return nil, err } - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } - - err = util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) + err = util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace), wfReq.WorkflowName) if err != nil { log.Warnf("Failed to resume '%s': %s", wfReq.WorkflowName, err) return nil, err } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } @@ -317,17 +275,12 @@ func (s *WorkflowServer) Suspend(ctx context.Context, wfReq *WorkflowUpdateReque return nil, err } - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } - - err = util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) + err = util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace), wfReq.WorkflowName) if err != nil { return nil, err } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } @@ -341,17 +294,12 @@ func (s *WorkflowServer) Terminate(ctx context.Context, wfReq *WorkflowUpdateReq return nil, err } - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } - - err = util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) + err = util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace), wfReq.WorkflowName) if err != nil { return nil, err } - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) + wf, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) if err != nil { return nil, err } @@ -365,11 +313,7 @@ func (s *WorkflowServer) Lint(ctx context.Context, wfReq *WorkflowCreateRequest) return nil, err } - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } - wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(namespace)) + wftmplGetter := templateresolution.WrapWorkflowTemplateInterface(wfClient.ArgoprojV1alpha1().WorkflowTemplates(wfReq.Namespace)) err = validate.ValidateWorkflow(wftmplGetter, wfReq.Workflow, validate.ValidateOpts{}) if err != nil { @@ -385,12 +329,7 @@ func (s *WorkflowServer) Watch(wfReq *WorkflowGetRequest, ws WorkflowService_Wat return err } - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } - - wfs, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Watch(v1.ListOptions{}) + wfs, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Watch(v1.ListOptions{}) if err != nil { return err } @@ -425,16 +364,12 @@ func (s *WorkflowServer) PodLogs(wfReq *WorkflowLogRequest, log WorkflowService_ return err } - namespace := s.namespace - if wfReq.Namespace != "" { - namespace = wfReq.Namespace - } containerName := "main" if wfReq.Container != "" { containerName = wfReq.Container } - stream, err := kubeClient.CoreV1().Pods(namespace).GetLogs(wfReq.PodName, &corev1.PodLogOptions{ + stream, err := kubeClient.CoreV1().Pods(wfReq.Namespace).GetLogs(wfReq.PodName, &corev1.PodLogOptions{ Container: containerName, Follow: wfReq.LogOptions.Follow, Timestamps: true, diff --git a/cmd/server/workflow/workflow_service.go b/cmd/server/workflow/workflow_service.go deleted file mode 100644 index 1a07aeae20d3..000000000000 --- a/cmd/server/workflow/workflow_service.go +++ /dev/null @@ -1,185 +0,0 @@ -package workflow - -import ( - "encoding/json" - "errors" - log "github.com/sirupsen/logrus" - "golang.org/x/net/context" - "google.golang.org/grpc/metadata" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - "github.com/argoproj/argo/cmd/server/common" - "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1" - "github.com/argoproj/argo/pkg/client/clientset/versioned" - wfclientset "github.com/argoproj/argo/pkg/client/clientset/versioned" - "github.com/argoproj/argo/workflow/util" -) - -type KubeService struct { - namespace string - wfClientset *versioned.Clientset - kubeClientset *kubernetes.Clientset - enableClientAuth bool -} - -func NewKubeServer(Namespace string, wfClientset *wfclientset.Clientset, kubeClientSet *kubernetes.Clientset, enableClientAuth bool) *KubeService { - return &KubeService{ - namespace: Namespace, - wfClientset: wfClientset, - kubeClientset: kubeClientSet, - enableClientAuth: enableClientAuth, - } -} - -func (s *KubeService) GetWFClient(ctx context.Context) (*versioned.Clientset, *kubernetes.Clientset, error) { - md, _ := metadata.FromIncomingContext(ctx) - - if s.enableClientAuth { - return s.wfClientset, s.kubeClientset, nil - } - - var restConfigStr, bearerToken string - if len(md.Get(common.CLIENT_REST_CONFIG)) == 0 { - return nil, nil, errors.New("Client kubeconfig is not found") - } - restConfigStr = md.Get(common.CLIENT_REST_CONFIG)[0] - - if len(md.Get(common.AUTH_TOKEN)) > 0 { - bearerToken = md.Get(common.AUTH_TOKEN)[0] - } - - restConfig := rest.Config{} - - err := json.Unmarshal([]byte(restConfigStr), &restConfig) - if err != nil { - return nil, nil, err - } - - restConfig.BearerToken = bearerToken - - wfClientset, err := wfclientset.NewForConfig(&restConfig) - if err != nil { - log.Errorf("Failure to create wfClientset with ClientConfig '%+v': %s", restConfig, err) - return nil, nil, err - } - - clientset, err := kubernetes.NewForConfig(&restConfig) - if err != nil { - log.Errorf("Failure to create kubeClientset with ClientConfig '%+v': %s", restConfig, err) - return nil, nil, err - } - - return wfClientset, clientset, nil -} - -func (s *KubeService) Create(wfClient *versioned.Clientset, wfReq *WorkflowCreateRequest) (*v1alpha1.Workflow, error) { - createdWf, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Create(wfReq.Workflow) - if err != nil { - log.Warnf("Create request is failed. Error: %s", err) - return nil, err - } - - log.Infof("Workflow created successfully. Name: %s", createdWf.Name) - return createdWf, nil -} - -func (s *KubeService) Get(wfClient *versioned.Clientset, wfReq *WorkflowGetRequest) (*v1alpha1.Workflow, error) { - wf, err := wfClient.ArgoprojV1alpha1().Workflows(wfReq.Namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) - if err != nil { - return nil, err - } - return wf, err -} - -func (s *KubeService) List(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowListRequest) (*v1alpha1.WorkflowList, error) { - wfList, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).List(v1.ListOptions{}) - if err != nil { - return nil, err - } - return wfList, nil -} - -func (s *KubeService) Delete(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowDeleteRequest) (*WorkflowDeleteResponse, error) { - err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Delete(wfReq.WorkflowName, &v1.DeleteOptions{}) - if err != nil { - return nil, err - } - return nil, nil -} - -func (s *KubeService) Retry(wfClient *versioned.Clientset, kubeClient *kubernetes.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) - if err != nil { - return nil, err - } - - wf, err = util.RetryWorkflow(kubeClient, wfClient.ArgoprojV1alpha1().Workflows(namespace), wf) - if err != nil { - return nil, err - } - return wf, err -} - -func (s *KubeService) Resubmit(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) - if err != nil { - return nil, err - } - - newWF, err := util.FormulateResubmitWorkflow(wf, wfReq.Memoized) - if err != nil { - return nil, err - } - - created, err := util.SubmitWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfClient, namespace, newWF, nil) - if err != nil { - return nil, err - } - - return created, err -} - -func (s *KubeService) Resume(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - err := util.ResumeWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) - if err != nil { - log.Warnf("Failed to resume %s: %+v", wfReq.WorkflowName, err) - return nil, err - } - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) - if err != nil { - return nil, err - } - - return wf, nil -} - -func (s *KubeService) Suspend(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - err := util.SuspendWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) - if err != nil { - return nil, err - } - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) - if err != nil { - return nil, err - } - - return wf, nil -} - -func (s *KubeService) Terminate(wfClient *versioned.Clientset, namespace string, wfReq *WorkflowUpdateRequest) (*v1alpha1.Workflow, error) { - err := util.TerminateWorkflow(wfClient.ArgoprojV1alpha1().Workflows(namespace), wfReq.WorkflowName) - if err != nil { - return nil, err - } - - wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(wfReq.WorkflowName, v1.GetOptions{}) - if err != nil { - return nil, err - } - - return wf, nil -} From 848c91081e8a39d3f24465d27f5574ea75858471 Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Tue, 17 Dec 2019 05:59:31 -0800 Subject: [PATCH 027/421] always --- .circleci/config.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 53ab91df0a71..f5089f02cdf3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,6 @@ commands: name: Restore vendor cache keys: - vendor-v2-{{ checksum "Gopkg.lock" }}-{{ .Environment.CIRCLE_JOB }} - - vendor-v1-{{ checksum "Gopkg.lock" }}-{{ .Environment.CIRCLE_JOB }} - run: name: Ensure dependencies command: dep ensure -v @@ -43,8 +42,6 @@ commands: keys: - go-v2-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }} - go-v2-master-{{ .Environment.CIRCLE_JOB }} - - go-v1-{{ .Branch }}-{{ .Environment.CIRCLE_JOB }} - - go-v1-master-{{ .Environment.CIRCLE_JOB }} jobs: e2e: working_directory: /home/circleci/.go_workspace/src/github.com/argoproj/argo @@ -99,7 +96,8 @@ jobs: mkdir -p test-results trap 'go-junit-report < test-results/test.out > test-results/junit.xml' EXIT make test-e2e 2>&1 | tee test-results/test.out - - save_go_cache + - save_go_cache: + when: always - store_test_results: path: test-results - store_artifacts: From 8a5f2ce107527922f33897c0339b8c022b7ef3c8 Mon Sep 17 00:00:00 2001 From: Alex Collins Date: Tue, 17 Dec 2019 05:59:52 -0800 Subject: [PATCH 028/421] sleep --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f5089f02cdf3..cd9826944d91 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -74,7 +74,7 @@ jobs: - run: KUBECONFIG=~/.kube/config make start-e2e - run: name: Re-establish port forward - command: make port-forward-e2e && wait + command: make port-forward-e2e && sleep 3 && wait background: true - run: name: Follow logs, to help diagnose failures From 8a152801e857a8f8f4650e99c488f65273c0016f Mon Sep 17 00:00:00 2001 From: Simon Behar Date: Tue, 17 Dec 2019 09:16:20 -0800 Subject: [PATCH 029/421] Added linter --- ui/package.json | 3 +++ ui/tslint.json | 3 ++- ui/yarn.lock | 50 +++++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 51 insertions(+), 5 deletions(-) diff --git a/ui/package.json b/ui/package.json index 1e0bd4243a78..68d871d9aa03 100644 --- a/ui/package.json +++ b/ui/package.json @@ -37,6 +37,7 @@ "json-stream": "^1.0.0", "kubernetes-client": "3.17.1", "moment": "^2.20.1", + "prettier": "^1.19.1", "prop-types": "^15.6.0", "react": "^16.8.3", "react-dom": "^16.8.3", @@ -48,6 +49,8 @@ "superagent": "^3.8.2", "superagent-promise": "^1.1.0", "ts-loader": "^6.0.4", + "tslint-config-prettier": "^1.18.0", + "tslint-plugin-prettier": "^2.0.1", "typescript": "^2.8.3", "util.promisify": "^1.0.0", "webpack-cli": "^3.3.5", diff --git a/ui/tslint.json b/ui/tslint.json index a2472b5670e1..13b94501c2af 100644 --- a/ui/tslint.json +++ b/ui/tslint.json @@ -1,9 +1,10 @@ { "extends": [ - "tslint:recommended", "tslint-react" + "tslint:recommended", "tslint-react", "tslint-plugin-prettier", "tslint-config-prettier" ], "jsRules": {}, "rules": { + "prettier": true, "quotemark": [true, "single"], "no-var-requires": false, "interface-name": false, diff --git a/ui/yarn.lock b/ui/yarn.lock index 2267b0a8a07d..0e1747c0b7c7 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -4537,6 +4537,14 @@ escape-string-regexp@1.0.5, escape-string-regexp@^1.0.2, escape-string-regexp@^1 version "1.0.5" resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" +eslint-plugin-prettier@^2.2.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-2.7.0.tgz#b4312dcf2c1d965379d7f9d5b5f8aaadc6a45904" + integrity sha512-CStQYJgALoQBw3FsBzH0VOVDRnJ/ZimUlpLm226U8qgqYJfPOY/CPK6wyRInMxh73HSKg5wyRwdS4BVYYHwokA== + dependencies: + fast-diff "^1.1.1" + jest-docblock "^21.0.0" + eslint-scope@^4.0.0: version "4.0.3" resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-4.0.3.tgz#ca03833310f6889a3264781aa82e63eb9cfe7848" @@ -4798,6 +4806,11 @@ fast-deep-equal@^2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz#7b05218ddf9667bf7f370bf7fdb2cb15fdd0aa49" +fast-diff@^1.1.1: + version "1.2.0" + resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.2.0.tgz#73ee11982d86caaf7959828d519cfe927fac5f03" + integrity sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w== + fast-glob@^2.0.2: version "2.2.7" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-2.2.7.tgz#6953857c3afa475fff92ee6015d52da70a4cd39d" @@ -6448,6 +6461,11 @@ isstream@0.1.x, isstream@~0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/isstream/-/isstream-0.1.2.tgz#47e63f7af55afa6f92e1500e690eb8b8529c099a" +jest-docblock@^21.0.0: + version "21.2.0" + resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-21.2.0.tgz#51529c3b30d5fd159da60c27ceedc195faf8d414" + integrity sha512-5IZ7sY9dBAYSV+YjQ0Ovb540Ku7AO9Z5o2Cg789xj167iQuZ2cG+z0f3Uct6WeYLbU6aQiM2pCs7sZ+4dotydw== + jmespath@0.15.0: version "0.15.0" resolved "https://registry.yarnpkg.com/jmespath/-/jmespath-0.15.0.tgz#a3f222a9aae9f966f5d27c796510e28091764217" @@ -6743,6 +6761,11 @@ lcid@^2.0.0: dependencies: invert-kv "^2.0.0" +lines-and-columns@^1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.1.6.tgz#1c00c743b433cd0a4e80758f7b64a57440d9ff00" + integrity sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA= + load-json-file@^1.0.0: version "1.1.0" resolved "https://registry.yarnpkg.com/load-json-file/-/load-json-file-1.1.0.tgz#956905708d58b4bab4c2261b04f59f31c99374c0" @@ -8324,6 +8347,11 @@ prepend-http@^1.0.0, prepend-http@^1.0.1: version "1.0.4" resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc" +prettier@^1.19.1: + version "1.19.1" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.19.1.tgz#f7d7f5ff8a9cd872a7be4ca142095956a60797cb" + integrity sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew== + pretty-error@^2.0.2, pretty-error@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/pretty-error/-/pretty-error-2.1.1.tgz#5f4f87c8f91e5ae3f3ba87ab4cf5e03b1a17f1a3" @@ -10657,14 +10685,28 @@ tsconfig@^7.0.0: strip-bom "^3.0.0" strip-json-comments "^2.0.0" +tslib@^1.7.1, tslib@^1.9.0: + version "1.10.0" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.10.0.tgz#c3c19f95973fb0a62973fb09d90d961ee43e5c8a" + integrity sha512-qOebF53frne81cf0S9B41ByenJ3/IuH8yJKngAX35CmiZySA0khhkovshKK+jGCaMnVomla7gVlIcc3EvKPbTQ== + tslib@^1.8.0, tslib@^1.8.1: version "1.9.3" resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.9.3.tgz#d7e4dd79245d85428c4d7e4822a79917954ca286" -tslib@^1.9.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.10.0.tgz#c3c19f95973fb0a62973fb09d90d961ee43e5c8a" - integrity sha512-qOebF53frne81cf0S9B41ByenJ3/IuH8yJKngAX35CmiZySA0khhkovshKK+jGCaMnVomla7gVlIcc3EvKPbTQ== +tslint-config-prettier@^1.18.0: + version "1.18.0" + resolved "https://registry.yarnpkg.com/tslint-config-prettier/-/tslint-config-prettier-1.18.0.tgz#75f140bde947d35d8f0d238e0ebf809d64592c37" + integrity sha512-xPw9PgNPLG3iKRxmK7DWr+Ea/SzrvfHtjFt5LBl61gk2UBG/DB9kCXRjv+xyIU1rUtnayLeMUVJBcMX8Z17nDg== + +tslint-plugin-prettier@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/tslint-plugin-prettier/-/tslint-plugin-prettier-2.0.1.tgz#95b6a3b766622ffc44375825d7760225c50c3680" + integrity sha512-4FX9JIx/1rKHIPJNfMb+ooX1gPk5Vg3vNi7+dyFYpLO+O57F4g+b/fo1+W/G0SUOkBLHB/YKScxjX/P+7ZT/Tw== + dependencies: + eslint-plugin-prettier "^2.2.0" + lines-and-columns "^1.1.6" + tslib "^1.7.1" tslint-react@^3.4.0: version "3.6.0" From 6d80cd12990712b6b06aa8bb18b0d7bf0571fc5f Mon Sep 17 00:00:00 2001 From: Simon Behar Date: Tue, 17 Dec 2019 09:24:09 -0800 Subject: [PATCH 030/421] Formated UI code --- ui/.prettierrc | 9 + ui/package.json | 4 +- ui/src/api/app.ts | 154 ++++++++------- ui/src/api/console-proxy.ts | 86 +++++---- ui/src/api/main.ts | 14 +- ui/src/api/utils.ts | 30 +-- ui/src/app/app.tsx | 74 ++++---- ui/src/app/help/components/help.tsx | 39 ++-- ui/src/app/help/index.tsx | 4 +- ui/src/app/index.tsx | 2 +- ui/src/app/shared/base.ts | 2 +- ui/src/app/shared/services/index.ts | 4 +- ui/src/app/shared/services/requests.ts | 20 +- .../app/shared/services/workflows-service.ts | 33 ++-- ui/src/app/shared/utils.spec.ts | 4 +- ui/src/app/shared/utils.ts | 16 +- .../components/workflow-artifacts.tsx | 43 +++-- .../components/workflow-dag/workflow-dag.tsx | 87 +++++---- .../workflow-details/workflow-details.tsx | 149 +++++++-------- .../workflow-list-item/workflow-list-item.tsx | 16 +- .../workflow-node-info/workflow-node-info.tsx | 176 +++++++++++------- .../components/workflow-parameters-panel.tsx | 10 +- .../workflow-steps/workflow-steps.tsx | 71 ++++--- .../components/workflow-summary-panel.tsx | 16 +- .../workflow-timeline/workflow-timeline.tsx | 95 +++++----- .../workflow-yaml-viewer.tsx | 120 ++++++------ .../components/workflows-container.tsx | 10 +- .../workflows-list/workflows-list.tsx | 70 +++---- ui/src/app/workflows/index.ts | 4 +- ui/src/models/workflows.ts | 19 +- 30 files changed, 760 insertions(+), 621 deletions(-) create mode 100644 ui/.prettierrc diff --git a/ui/.prettierrc b/ui/.prettierrc new file mode 100644 index 000000000000..0abdae70b50d --- /dev/null +++ b/ui/.prettierrc @@ -0,0 +1,9 @@ +{ + "bracketSpacing": false, + "jsxSingleQuote": true, + "printWidth": 180, + "singleQuote": true, + "tabWidth": 4, + "jsxBracketSameLine": true, + "quoteProps": "consistent" +} diff --git a/ui/package.json b/ui/package.json index 68d871d9aa03..571b0b892f79 100644 --- a/ui/package.json +++ b/ui/package.json @@ -15,8 +15,8 @@ "start:ui": "webpack-dev-server --config ./src/app/webpack.config.js", "start:api": "TS_NODE_PROJECT=./src/api/tsconfig.json nodemon --nolazy --inspect -r ts-node/register ./src/api/main.ts", "lint": "yarn lint:ui && yarn lint:api", - "lint:ui": "tslint -p ./src/app", - "lint:api": "tslint -p ./src/api", + "lint:ui": "tslint --fix -p ./src/app", + "lint:api": "tslint --fix -p ./src/api", "test": "mocha --require ts-node/register ./src/app/**/*.spec.ts" }, "dependencies": { diff --git a/ui/src/api/app.ts b/ui/src/api/app.ts index 352afca3a484..f559278045ce 100644 --- a/ui/src/api/app.ts +++ b/ui/src/api/app.ts @@ -7,7 +7,7 @@ import * as http from 'http'; import * as JSONStream from 'json-stream'; import * as Api from 'kubernetes-client'; import * as path from 'path'; -import { Observable, Observer } from 'rxjs'; +import {Observable, Observer} from 'rxjs'; import * as nodeStream from 'stream'; import * as promisify from 'util.promisify'; import * as winston from 'winston'; @@ -16,27 +16,26 @@ import * as zlib from 'zlib'; import * as models from '../models/workflows'; import * as consoleProxy from './console-proxy'; -import { decodeBase64, reactifyStringStream, streamServerEvents } from './utils'; +import {decodeBase64, reactifyStringStream, streamServerEvents} from './utils'; const winstonTransport = new winston.transports.Console({ - format: winston.format.combine( - winston.format.timestamp(), - winston.format.simple(), - ), + format: winston.format.combine(winston.format.timestamp(), winston.format.simple()) }); const logger = winston.createLogger({ - transports: [winstonTransport], + transports: [winstonTransport] }); function serve(res: express.Response, action: () => Promise) { - action().then((val) => res.send(val)).catch((err) => { - if (err instanceof Error) { - err = {...err, message: err.message}; - } - res.status(500).send(err); - logger.error(err); - }); + action() + .then(val => res.send(val)) + .catch(err => { + if (err instanceof Error) { + err = {...err, message: err.message}; + } + res.status(500).send(err); + logger.error(err); + }); } function fileToString(filePath: string): Promise { @@ -52,27 +51,29 @@ function fileToString(filePath: string): Promise { } export function create( - uiDist: string, - uiBaseHref: string, - inCluster: boolean, - namespace: string, - forceNamespaceIsolation: boolean, - instanceId: string, - version, - group = 'argoproj.io') { - const config = Object.assign( - {}, inCluster ? Api.config.getInCluster() : Api.config.fromKubeconfig(), {namespace, promises: true }); + uiDist: string, + uiBaseHref: string, + inCluster: boolean, + namespace: string, + forceNamespaceIsolation: boolean, + instanceId: string, + version, + group = 'argoproj.io' +) { + const config = Object.assign({}, inCluster ? Api.config.getInCluster() : Api.config.fromKubeconfig(), {namespace, promises: true}); const core = new Api.Core(config); const crd = new Api.CustomResourceDefinitions(Object.assign(config, {version, group})); crd.addResource('workflows'); const app = express(); app.use(bodyParser.json({type: () => true})); - app.use(expressWinston.logger({ - transports: [winstonTransport], - meta: false, - msg: '{{res.statusCode}} {{req.method}} {{res.responseTime}}ms {{req.url}}', - })); + app.use( + expressWinston.logger({ + transports: [winstonTransport], + meta: false, + msg: '{{res.statusCode}} {{req.method}} {{res.responseTime}}ms {{req.url}}' + }) + ); function getWorkflowLabelSelector(req) { const labelSelector: string[] = []; @@ -88,49 +89,55 @@ export function create( return labelSelector; } - app.get('/api/workflows', (req, res) => serve(res, async () => { - const labelSelector = getWorkflowLabelSelector(req); + app.get('/api/workflows', (req, res) => + serve(res, async () => { + const labelSelector = getWorkflowLabelSelector(req); - const workflowList = await (forceNamespaceIsolation ? crd.ns(namespace) : crd).workflows.get({ - qs: { labelSelector: labelSelector.join(',') }, - }) as models.WorkflowList; + const workflowList = (await (forceNamespaceIsolation ? crd.ns(namespace) : crd).workflows.get({ + qs: {labelSelector: labelSelector.join(',')} + })) as models.WorkflowList; - workflowList.items.sort(models.compareWorkflows); - workflowList.items = await Promise.all(workflowList.items.map(deCompressNodes)); - return workflowList; - })); + workflowList.items.sort(models.compareWorkflows); + workflowList.items = await Promise.all(workflowList.items.map(deCompressNodes)); + return workflowList; + }) + ); - app.get('/api/workflows/:namespace/:name', - async (req, res) => serve(res, () => (forceNamespaceIsolation ? crd.ns(namespace) : crd.ns(req.params.namespace)).workflows.get(req.params.name).then((deCompressNodes)))); + app.get('/api/workflows/:namespace/:name', async (req, res) => + serve(res, () => (forceNamespaceIsolation ? crd.ns(namespace) : crd.ns(req.params.namespace)).workflows.get(req.params.name).then(deCompressNodes)) + ); app.get('/api/workflows/live', async (req, res) => { const ns = getNamespace(req); let updatesSource = new Observable((observer: Observer) => { const labelSelector = getWorkflowLabelSelector(req); - let stream = (ns ? crd.ns(ns) : crd).workflows.getStream({ qs: { watch: true, labelSelector: labelSelector.join(',') } }); + let stream = (ns ? crd.ns(ns) : crd).workflows.getStream({qs: {watch: true, labelSelector: labelSelector.join(',')}}); stream.on('end', () => observer.complete()); - stream.on('error', (e) => observer.error(e)); + stream.on('error', e => observer.error(e)); stream.on('close', () => observer.complete()); stream = stream.pipe(new JSONStream()); - stream.on('data', (data) => data && observer.next(data)); - }).flatMap((change) => Observable.fromPromise(deCompressNodes(change.object).then((workflow) => ({...change, object: workflow})))); + stream.on('data', data => data && observer.next(data)); + }).flatMap(change => Observable.fromPromise(deCompressNodes(change.object).then(workflow => ({...change, object: workflow})))); if (ns) { - updatesSource = updatesSource.filter((change) => { + updatesSource = updatesSource.filter(change => { return change.object.metadata.namespace === ns; }); } if (req.query.name) { - updatesSource = updatesSource.filter((change) => change.object.metadata.name === req.query.name); + updatesSource = updatesSource.filter(change => change.object.metadata.name === req.query.name); } - streamServerEvents(req, res, updatesSource, (item) => JSON.stringify(item)); + streamServerEvents(req, res, updatesSource, item => JSON.stringify(item)); }); function getNamespace(req: express.Request) { - return forceNamespaceIsolation ? namespace : (req.query.namespace || req.params.namespace); + return forceNamespaceIsolation ? namespace : req.query.namespace || req.params.namespace; } function getWorkflow(ns: string, name: string): Promise { - return crd.ns(ns).workflows.get(name).then(deCompressNodes); + return crd + .ns(ns) + .workflows.get(name) + .then(deCompressNodes); } async function deCompressNodes(workFlow: models.Workflow): Promise { @@ -145,30 +152,38 @@ export function create( } } - function loadNodeArtifact(wf: models.Workflow, nodeId: string, artifactName: string): Promise<{ data: Buffer, fileName: string }> { + function loadNodeArtifact(wf: models.Workflow, nodeId: string, artifactName: string): Promise<{data: Buffer; fileName: string}> { return new Promise(async (resolve, reject) => { const node = wf.status.nodes[nodeId]; - const artifact = node.outputs.artifacts.find((item) => item.name === artifactName); + const artifact = node.outputs.artifacts.find(item => item.name === artifactName); if (artifact.s3) { try { - const secretAccessKey = decodeBase64((await core.ns( - wf.metadata.namespace).secrets.get(artifact.s3.secretKeySecret.name)).data[artifact.s3.secretKeySecret.key]).trim(); - const accessKeyId = decodeBase64((await core.ns( - wf.metadata.namespace).secrets.get(artifact.s3.accessKeySecret.name)).data[artifact.s3.accessKeySecret.key]).trim(); + const secretAccessKey = decodeBase64( + (await core.ns(wf.metadata.namespace).secrets.get(artifact.s3.secretKeySecret.name)).data[artifact.s3.secretKeySecret.key] + ).trim(); + const accessKeyId = decodeBase64( + (await core.ns(wf.metadata.namespace).secrets.get(artifact.s3.accessKeySecret.name)).data[artifact.s3.accessKeySecret.key] + ).trim(); const s3 = new aws.S3({ - region: artifact.s3.region, secretAccessKey, accessKeyId, endpoint: `http://${artifact.s3.endpoint}`, s3ForcePathStyle: true, signatureVersion: 'v4' }); - s3.getObject({ Bucket: artifact.s3.bucket, Key: artifact.s3.key }, (err, data) => { + region: artifact.s3.region, + secretAccessKey, + accessKeyId, + endpoint: `http://${artifact.s3.endpoint}`, + s3ForcePathStyle: true, + signatureVersion: 'v4' + }); + s3.getObject({Bucket: artifact.s3.bucket, Key: artifact.s3.key}, (err, data) => { if (err) { reject(err); } else { - resolve({ data: data.Body as Buffer, fileName: path.basename(artifact.s3.key) }); + resolve({data: data.Body as Buffer, fileName: path.basename(artifact.s3.key)}); } }); } catch (e) { reject(e); } } else { - reject({ code: 'INTERNAL_ERROR', message: 'Artifact source is not supported' }); + reject({code: 'INTERNAL_ERROR', message: 'Artifact source is not supported'}); } }); } @@ -193,13 +208,17 @@ export function create( try { await core.ns(wf.metadata.namespace).pods.get(req.params.nodeId); const logsSource = reactifyStringStream( - core.ns(wf.metadata.namespace).po(req.params.nodeId).log.getStream({ qs: { container: req.params.container, follow: true } })); - streamServerEvents(req, res, logsSource, (item) => item.toString()); + core + .ns(wf.metadata.namespace) + .po(req.params.nodeId) + .log.getStream({qs: {container: req.params.container, follow: true}}) + ); + streamServerEvents(req, res, logsSource, item => item.toString()); } catch (e) { if (e.code === 404) { // Try load logs from S3 if pod already deleted const artifact = await loadNodeArtifact(wf, req.params.nodeId, 'main-logs'); - streamServerEvents(req, res, Observable.from(artifact.data.toString('utf8').split('\n')), (line) => line); + streamServerEvents(req, res, Observable.from(artifact.data.toString('utf8').split('\n')), line => line); } else { throw e; } @@ -211,11 +230,12 @@ export function create( }); const serveIndex = (req: express.Request, res: express.Response) => { - fileToString(`${uiDist}/index.html`).then((content) => { - return content.replace(``, ``); - }) - .then((indexContent) => res.send(indexContent)) - .catch((err) => res.send(err)); + fileToString(`${uiDist}/index.html`) + .then(content => { + return content.replace(``, ``); + }) + .then(indexContent => res.send(indexContent)) + .catch(err => res.send(err)); }; app.get('/index.html', serveIndex); diff --git a/ui/src/api/console-proxy.ts b/ui/src/api/console-proxy.ts index c8794f445612..e019fa2999a9 100644 --- a/ui/src/api/console-proxy.ts +++ b/ui/src/api/console-proxy.ts @@ -20,45 +20,55 @@ function safeCallback(callback) { export function create(server: http.Server, core) { const wss = new WebSocket.Server({server}); - wss.on('connection', safeCallback((ws, req) => { - const location = url.parse(req.url, true); - const match = location - .path - .match(/\/api\/steps\/([^/]*)\/([^/]*)\/exec/); - if (match) { - const cmd = [location.query.cmd]; - const [, ns, pod] = match; - const apiUri = url - .parse(core.url) - .host; - let uri = `wss://${apiUri}/api/v1/namespaces/${ns}/pods/${pod}/exec?stdout=1&stdin=1&stderr=1&tty=1&container=main`; - cmd.forEach((subCmd) => uri += `&command=${encodeURIComponent(subCmd as string)}`); + wss.on( + 'connection', + safeCallback((ws, req) => { + const location = url.parse(req.url, true); + const match = location.path.match(/\/api\/steps\/([^/]*)\/([^/]*)\/exec/); + if (match) { + const cmd = [location.query.cmd]; + const [, ns, pod] = match; + const apiUri = url.parse(core.url).host; + let uri = `wss://${apiUri}/api/v1/namespaces/${ns}/pods/${pod}/exec?stdout=1&stdin=1&stderr=1&tty=1&container=main`; + cmd.forEach(subCmd => (uri += `&command=${encodeURIComponent(subCmd as string)}`)); - const kubeClient = new WebSocket(uri, 'base64.channel.k8s.io', { - headers: { - Authorization: `Bearer ${core.requestOptions.auth.bearer}`, - }, - }); + const kubeClient = new WebSocket(uri, 'base64.channel.k8s.io', { + headers: { + Authorization: `Bearer ${core.requestOptions.auth.bearer}` + } + }); - kubeClient.on('message', safeCallback((data) => { - if (data[0].match(/^[0-3]$/)) { - ws.send(utils.decodeBase64(data.slice(1))); - } - })); - kubeClient.on('close', safeCallback(() => { - ws.terminate(); - })); - kubeClient.on('error', safeCallback((err) => { - ws.send(err.message); - ws.terminate(); - })); - - ws.on('message', safeCallback((message) => { - kubeClient.send('0' + utils.encodeBase64(message)); - })); - } else { - ws.close(1002, 'Invalid URL'); - } - })); + kubeClient.on( + 'message', + safeCallback(data => { + if (data[0].match(/^[0-3]$/)) { + ws.send(utils.decodeBase64(data.slice(1))); + } + }) + ); + kubeClient.on( + 'close', + safeCallback(() => { + ws.terminate(); + }) + ); + kubeClient.on( + 'error', + safeCallback(err => { + ws.send(err.message); + ws.terminate(); + }) + ); + ws.on( + 'message', + safeCallback(message => { + kubeClient.send('0' + utils.encodeBase64(message)); + }) + ); + } else { + ws.close(1002, 'Invalid URL'); + } + }) + ); } diff --git a/ui/src/api/main.ts b/ui/src/api/main.ts index cf3a4a232514..287cd8a3a359 100644 --- a/ui/src/api/main.ts +++ b/ui/src/api/main.ts @@ -12,11 +12,11 @@ const port = argv.port || '8001'; console.log(`start argo-ui on ${argv.ip}:${argv.port}`); app.create( - argv.uiDist || path.join(__dirname, '..', '..', 'dist', 'app'), - argv.uiBaseHref || '/', - argv.inCluster === 'true', - argv.namespace || 'default', - argv.forceNamespaceIsolation === 'true', - argv.instanceId || undefined, - argv.crdVersion || 'v1alpha1', + argv.uiDist || path.join(__dirname, '..', '..', 'dist', 'app'), + argv.uiBaseHref || '/', + argv.inCluster === 'true', + argv.namespace || 'default', + argv.forceNamespaceIsolation === 'true', + argv.instanceId || undefined, + argv.crdVersion || 'v1alpha1' ).listen(port, ip); diff --git a/ui/src/api/utils.ts b/ui/src/api/utils.ts index 86ac11bf2b3f..9b2ce7a4a0a8 100644 --- a/ui/src/api/utils.ts +++ b/ui/src/api/utils.ts @@ -1,30 +1,34 @@ import * as express from 'express'; import {Observable, Observer} from 'rxjs'; -export function reactifyStream(stream, converter = (item) => item) { - return new Observable((observer: Observer < any >) => { - stream.on('data', (d) => observer.next(converter(d))); +export function reactifyStream(stream, converter = item => item) { + return new Observable((observer: Observer) => { + stream.on('data', d => observer.next(converter(d))); stream.on('end', () => observer.complete()); - stream.on('error', (e) => observer.error(e)); + stream.on('error', e => observer.error(e)); }); } export function reactifyStringStream(stream) { - return reactifyStream(stream, (item) => item.toString()); + return reactifyStream(stream, item => item.toString()); } -export function streamServerEvents (req: express.Request, res: express.Response, source: Observable , formatter: (input: T) => string) { +export function streamServerEvents(req: express.Request, res: express.Response, source: Observable, formatter: (input: T) => string) { res.setHeader('Content-Type', 'text/event-stream'); res.setHeader('Transfer-Encoding', 'chunked'); res.setHeader('X-Content-Type-Options', 'nosniff'); - const subscription = source.subscribe((info) => res.write(`data:${formatter(info)}\n\n`), (err) => { - res.set(200); - res.end(); - }, () => { - res.set(200); - res.end(); - }); + const subscription = source.subscribe( + info => res.write(`data:${formatter(info)}\n\n`), + err => { + res.set(200); + res.end(); + }, + () => { + res.set(200); + res.end(); + } + ); req.on('close', () => subscription.unsubscribe()); } diff --git a/ui/src/app/app.tsx b/ui/src/app/app.tsx index dabaf675a366..3e7ed4403fee 100644 --- a/ui/src/app/app.tsx +++ b/ui/src/app/app.tsx @@ -1,10 +1,10 @@ -import { AppContext, Layout, Notifications, NotificationsManager, Popup, PopupManager, PopupProps } from 'argo-ui'; +import {AppContext, Layout, Notifications, NotificationsManager, Popup, PopupManager, PopupProps} from 'argo-ui'; import createHistory from 'history/createBrowserHistory'; import * as PropTypes from 'prop-types'; import * as React from 'react'; -import { Redirect, Route, RouteComponentProps, Router, Switch } from 'react-router'; +import {Redirect, Route, RouteComponentProps, Router, Switch} from 'react-router'; -import { uiUrl } from './shared/base'; +import {uiUrl} from './shared/base'; export const history = createHistory(); @@ -14,25 +14,28 @@ import workflows from './workflows'; const workflowsUrl = uiUrl('workflows'); const helpUrl = uiUrl('help'); const timelineUrl = uiUrl('timeline'); -const routes: {[path: string]: { component: React.ComponentType> } } = { - [workflowsUrl]: { component: workflows.component }, - [helpUrl]: { component: help.component }, +const routes: {[path: string]: {component: React.ComponentType>}} = { + [workflowsUrl]: {component: workflows.component}, + [helpUrl]: {component: help.component} }; -const navItems = [{ - title: 'Timeline', - path: workflowsUrl, - iconClassName: 'argo-icon-timeline', -}, { - title: 'Help', - path: helpUrl, - iconClassName: 'argo-icon-docs', -}]; +const navItems = [ + { + title: 'Timeline', + path: workflowsUrl, + iconClassName: 'argo-icon-timeline' + }, + { + title: 'Help', + path: helpUrl, + iconClassName: 'argo-icon-docs' + } +]; -export class App extends React.Component<{}, { popupProps: PopupProps }> { +export class App extends React.Component<{}, {popupProps: PopupProps}> { public static childContextTypes = { history: PropTypes.object, - apis: PropTypes.object, + apis: PropTypes.object }; private popupManager: PopupManager; @@ -40,35 +43,42 @@ export class App extends React.Component<{}, { popupProps: PopupProps }> { constructor(props: {}) { super(props); - this.state = { popupProps: null }; + this.state = {popupProps: null}; this.popupManager = new PopupManager(); this.notificationsManager = new NotificationsManager(); } public componentDidMount() { - this.popupManager.popupProps.subscribe((popupProps) => this.setState({ popupProps })); + this.popupManager.popupProps.subscribe(popupProps => this.setState({popupProps})); } public render() { return (
- {this.state.popupProps && } + {this.state.popupProps && } - - ; } - public componentWillMount() { - const router = (this.context as AppContext).router; - router.history.push(router.route.location.pathname.replace(timelineUrl, workflowsUrl)); + + ; + } + public componentWillMount() { + const router = (this.context as AppContext).router; + router.history.push(router.route.location.pathname.replace(timelineUrl, workflowsUrl)); + } + } } - } }/> + /> - - {Object.keys(routes).map((path) => { + + {Object.keys(routes).map(path => { const route = routes[path]; - return ; + return ; })} @@ -78,6 +88,6 @@ export class App extends React.Component<{}, { popupProps: PopupProps }> { } public getChildContext() { - return { history, apis: { popup: this.popupManager, notifications: this.notificationsManager } }; + return {history, apis: {popup: this.popupManager, notifications: this.notificationsManager}}; } } diff --git a/ui/src/app/help/components/help.tsx b/ui/src/app/help/components/help.tsx index 18647e240264..c376aa319795 100644 --- a/ui/src/app/help/components/help.tsx +++ b/ui/src/app/help/components/help.tsx @@ -1,4 +1,4 @@ -import { Page } from 'argo-ui'; +import {Page} from 'argo-ui'; import * as React from 'react'; require('./help.scss'); @@ -8,38 +8,47 @@ export const Help = () => (
-
-
-
+